mirror of https://github.com/OpenIPC/firmware.git
6616 lines
205 KiB
Diff
6616 lines
205 KiB
Diff
diff -drupN a/drivers/android/binder.c b/drivers/android/binder.c
|
|
--- a/drivers/android/binder.c 2018-08-06 17:23:04.000000000 +0300
|
|
+++ b/drivers/android/binder.c 2022-06-12 05:28:14.000000000 +0300
|
|
@@ -15,6 +15,40 @@
|
|
*
|
|
*/
|
|
|
|
+/*
|
|
+ * Locking overview
|
|
+ *
|
|
+ * There are 3 main spinlocks which must be acquired in the
|
|
+ * order shown:
|
|
+ *
|
|
+ * 1) proc->outer_lock : protects binder_ref
|
|
+ * binder_proc_lock() and binder_proc_unlock() are
|
|
+ * used to acq/rel.
|
|
+ * 2) node->lock : protects most fields of binder_node.
|
|
+ * binder_node_lock() and binder_node_unlock() are
|
|
+ * used to acq/rel
|
|
+ * 3) proc->inner_lock : protects the thread and node lists
|
|
+ * (proc->threads, proc->waiting_threads, proc->nodes)
|
|
+ * and all todo lists associated with the binder_proc
|
|
+ * (proc->todo, thread->todo, proc->delivered_death and
|
|
+ * node->async_todo), as well as thread->transaction_stack
|
|
+ * binder_inner_proc_lock() and binder_inner_proc_unlock()
|
|
+ * are used to acq/rel
|
|
+ *
|
|
+ * Any lock under procA must never be nested under any lock at the same
|
|
+ * level or below on procB.
|
|
+ *
|
|
+ * Functions that require a lock held on entry indicate which lock
|
|
+ * in the suffix of the function name:
|
|
+ *
|
|
+ * foo_olocked() : requires node->outer_lock
|
|
+ * foo_nlocked() : requires node->lock
|
|
+ * foo_ilocked() : requires proc->inner_lock
|
|
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
|
|
+ * foo_nilocked(): requires node->lock and proc->inner_lock
|
|
+ * ...
|
|
+ */
|
|
+
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <asm/cacheflush.h>
|
|
@@ -24,7 +58,6 @@
|
|
#include <linux/fs.h>
|
|
#include <linux/list.h>
|
|
#include <linux/miscdevice.h>
|
|
-#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/nsproxy.h>
|
|
@@ -34,31 +67,31 @@
|
|
#include <linux/sched.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/uaccess.h>
|
|
-#include <linux/vmalloc.h>
|
|
-#include <linux/slab.h>
|
|
#include <linux/pid_namespace.h>
|
|
#include <linux/security.h>
|
|
+#include <linux/spinlock.h>
|
|
|
|
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
|
|
#define BINDER_IPC_32BIT 1
|
|
#endif
|
|
|
|
#include <uapi/linux/android/binder.h>
|
|
+#include "binder_alloc.h"
|
|
#include "binder_trace.h"
|
|
|
|
-static DEFINE_MUTEX(binder_main_lock);
|
|
+static HLIST_HEAD(binder_deferred_list);
|
|
static DEFINE_MUTEX(binder_deferred_lock);
|
|
-static DEFINE_MUTEX(binder_mmap_lock);
|
|
|
|
+static HLIST_HEAD(binder_devices);
|
|
static HLIST_HEAD(binder_procs);
|
|
-static HLIST_HEAD(binder_deferred_list);
|
|
+static DEFINE_MUTEX(binder_procs_lock);
|
|
+
|
|
static HLIST_HEAD(binder_dead_nodes);
|
|
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
|
|
|
|
static struct dentry *binder_debugfs_dir_entry_root;
|
|
static struct dentry *binder_debugfs_dir_entry_proc;
|
|
-static struct binder_node *binder_context_mgr_node;
|
|
-static kuid_t binder_context_mgr_uid = INVALID_UID;
|
|
-static int binder_last_id;
|
|
+static atomic_t binder_last_id;
|
|
|
|
#define BINDER_DEBUG_ENTRY(name) \
|
|
static int binder_##name##_open(struct inode *inode, struct file *file) \
|
|
@@ -104,22 +137,21 @@ enum {
|
|
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
|
|
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
|
|
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
|
|
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
|
|
- BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
|
|
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
|
|
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
|
|
+ BINDER_DEBUG_SPINLOCKS = 1U << 14,
|
|
};
|
|
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
|
|
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
|
|
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
|
|
|
|
-static bool binder_debug_no_lock;
|
|
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
|
|
+static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
|
|
+module_param_named(devices, binder_devices_param, charp, S_IRUGO);
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
|
|
static int binder_stop_on_user_error;
|
|
|
|
static int binder_set_stop_on_user_error(const char *val,
|
|
- struct kernel_param *kp)
|
|
+ const struct kernel_param *kp)
|
|
{
|
|
int ret;
|
|
|
|
@@ -145,6 +177,17 @@ module_param_call(stop_on_user_error, bi
|
|
binder_stop_on_user_error = 2; \
|
|
} while (0)
|
|
|
|
+#define to_flat_binder_object(hdr) \
|
|
+ container_of(hdr, struct flat_binder_object, hdr)
|
|
+
|
|
+#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
|
|
+
|
|
+#define to_binder_buffer_object(hdr) \
|
|
+ container_of(hdr, struct binder_buffer_object, hdr)
|
|
+
|
|
+#define to_binder_fd_array_object(hdr) \
|
|
+ container_of(hdr, struct binder_fd_array_object, hdr)
|
|
+
|
|
enum binder_stat_types {
|
|
BINDER_STAT_PROC,
|
|
BINDER_STAT_THREAD,
|
|
@@ -157,26 +200,27 @@ enum binder_stat_types {
|
|
};
|
|
|
|
struct binder_stats {
|
|
- int br[_IOC_NR(BR_FAILED_REPLY) + 1];
|
|
- int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
|
|
- int obj_created[BINDER_STAT_COUNT];
|
|
- int obj_deleted[BINDER_STAT_COUNT];
|
|
+ atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
|
|
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
|
|
+ atomic_t obj_created[BINDER_STAT_COUNT];
|
|
+ atomic_t obj_deleted[BINDER_STAT_COUNT];
|
|
};
|
|
|
|
static struct binder_stats binder_stats;
|
|
|
|
static inline void binder_stats_deleted(enum binder_stat_types type)
|
|
{
|
|
- binder_stats.obj_deleted[type]++;
|
|
+ atomic_inc(&binder_stats.obj_deleted[type]);
|
|
}
|
|
|
|
static inline void binder_stats_created(enum binder_stat_types type)
|
|
{
|
|
- binder_stats.obj_created[type]++;
|
|
+ atomic_inc(&binder_stats.obj_created[type]);
|
|
}
|
|
|
|
struct binder_transaction_log_entry {
|
|
int debug_id;
|
|
+ int debug_id_done;
|
|
int call_type;
|
|
int from_proc;
|
|
int from_thread;
|
|
@@ -186,10 +230,14 @@ struct binder_transaction_log_entry {
|
|
int to_node;
|
|
int data_size;
|
|
int offsets_size;
|
|
+ int return_error_line;
|
|
+ uint32_t return_error;
|
|
+ uint32_t return_error_param;
|
|
+ const char *context_name;
|
|
};
|
|
struct binder_transaction_log {
|
|
- int next;
|
|
- int full;
|
|
+ atomic_t cur;
|
|
+ bool full;
|
|
struct binder_transaction_log_entry entry[32];
|
|
};
|
|
static struct binder_transaction_log binder_transaction_log;
|
|
@@ -199,22 +247,50 @@ static struct binder_transaction_log_ent
|
|
struct binder_transaction_log *log)
|
|
{
|
|
struct binder_transaction_log_entry *e;
|
|
+ unsigned int cur = atomic_inc_return(&log->cur);
|
|
|
|
- e = &log->entry[log->next];
|
|
- memset(e, 0, sizeof(*e));
|
|
- log->next++;
|
|
- if (log->next == ARRAY_SIZE(log->entry)) {
|
|
- log->next = 0;
|
|
+ if (cur >= ARRAY_SIZE(log->entry))
|
|
log->full = 1;
|
|
- }
|
|
+ e = &log->entry[cur % ARRAY_SIZE(log->entry)];
|
|
+ WRITE_ONCE(e->debug_id_done, 0);
|
|
+ /*
|
|
+ * write-barrier to synchronize access to e->debug_id_done.
|
|
+ * We make sure the initialized 0 value is seen before
|
|
+ * memset() other fields are zeroed by memset.
|
|
+ */
|
|
+ smp_wmb();
|
|
+ memset(e, 0, sizeof(*e));
|
|
return e;
|
|
}
|
|
|
|
+struct binder_context {
|
|
+ struct binder_node *binder_context_mgr_node;
|
|
+ struct mutex context_mgr_node_lock;
|
|
+
|
|
+ kuid_t binder_context_mgr_uid;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct binder_device {
|
|
+ struct hlist_node hlist;
|
|
+ struct miscdevice miscdev;
|
|
+ struct binder_context context;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct binder_work - work enqueued on a worklist
|
|
+ * @entry: node enqueued on list
|
|
+ * @type: type of work to be performed
|
|
+ *
|
|
+ * There are separate work lists for proc, thread, and node (async).
|
|
+ */
|
|
struct binder_work {
|
|
struct list_head entry;
|
|
+
|
|
enum {
|
|
BINDER_WORK_TRANSACTION = 1,
|
|
BINDER_WORK_TRANSACTION_COMPLETE,
|
|
+ BINDER_WORK_RETURN_ERROR,
|
|
BINDER_WORK_NODE,
|
|
BINDER_WORK_DEAD_BINDER,
|
|
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
|
|
@@ -222,8 +298,76 @@ struct binder_work {
|
|
} type;
|
|
};
|
|
|
|
+struct binder_error {
|
|
+ struct binder_work work;
|
|
+ uint32_t cmd;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct binder_node - binder node bookkeeping
|
|
+ * @debug_id: unique ID for debugging
|
|
+ * (invariant after initialized)
|
|
+ * @lock: lock for node fields
|
|
+ * @work: worklist element for node work
|
|
+ * (protected by @proc->inner_lock)
|
|
+ * @rb_node: element for proc->nodes tree
|
|
+ * (protected by @proc->inner_lock)
|
|
+ * @dead_node: element for binder_dead_nodes list
|
|
+ * (protected by binder_dead_nodes_lock)
|
|
+ * @proc: binder_proc that owns this node
|
|
+ * (invariant after initialized)
|
|
+ * @refs: list of references on this node
|
|
+ * (protected by @lock)
|
|
+ * @internal_strong_refs: used to take strong references when
|
|
+ * initiating a transaction
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
+ * and by @lock)
|
|
+ * @local_weak_refs: weak user refs from local process
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
+ * and by @lock)
|
|
+ * @local_strong_refs: strong user refs from local process
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
+ * and by @lock)
|
|
+ * @tmp_refs: temporary kernel refs
|
|
+ * (protected by @proc->inner_lock while @proc
|
|
+ * is valid, and by binder_dead_nodes_lock
|
|
+ * if @proc is NULL. During inc/dec and node release
|
|
+ * it is also protected by @lock to provide safety
|
|
+ * as the node dies and @proc becomes NULL)
|
|
+ * @ptr: userspace pointer for node
|
|
+ * (invariant, no lock needed)
|
|
+ * @cookie: userspace cookie for node
|
|
+ * (invariant, no lock needed)
|
|
+ * @has_strong_ref: userspace notified of strong ref
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
+ * and by @lock)
|
|
+ * @pending_strong_ref: userspace has acked notification of strong ref
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
+ * and by @lock)
|
|
+ * @has_weak_ref: userspace notified of weak ref
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
+ * and by @lock)
|
|
+ * @pending_weak_ref: userspace has acked notification of weak ref
|
|
+ * (protected by @proc->inner_lock if @proc
|
|
+ * and by @lock)
|
|
+ * @has_async_transaction: async transaction to node in progress
|
|
+ * (protected by @lock)
|
|
+ * @sched_policy: minimum scheduling policy for node
|
|
+ * (invariant after initialized)
|
|
+ * @accept_fds: file descriptor operations supported for node
|
|
+ * (invariant after initialized)
|
|
+ * @min_priority: minimum scheduling priority
|
|
+ * (invariant after initialized)
|
|
+ * @inherit_rt: inherit RT scheduling policy from caller
|
|
+ * (invariant after initialized)
|
|
+ * @async_todo: list of async work items
|
|
+ * (protected by @proc->inner_lock)
|
|
+ *
|
|
+ * Bookkeeping structure for binder nodes.
|
|
+ */
|
|
struct binder_node {
|
|
int debug_id;
|
|
+ spinlock_t lock;
|
|
struct binder_work work;
|
|
union {
|
|
struct rb_node rb_node;
|
|
@@ -234,87 +378,186 @@ struct binder_node {
|
|
int internal_strong_refs;
|
|
int local_weak_refs;
|
|
int local_strong_refs;
|
|
+ int tmp_refs;
|
|
binder_uintptr_t ptr;
|
|
binder_uintptr_t cookie;
|
|
- unsigned has_strong_ref:1;
|
|
- unsigned pending_strong_ref:1;
|
|
- unsigned has_weak_ref:1;
|
|
- unsigned pending_weak_ref:1;
|
|
- unsigned has_async_transaction:1;
|
|
- unsigned accept_fds:1;
|
|
- unsigned min_priority:8;
|
|
+ struct {
|
|
+ /*
|
|
+ * bitfield elements protected by
|
|
+ * proc inner_lock
|
|
+ */
|
|
+ u8 has_strong_ref:1;
|
|
+ u8 pending_strong_ref:1;
|
|
+ u8 has_weak_ref:1;
|
|
+ u8 pending_weak_ref:1;
|
|
+ };
|
|
+ struct {
|
|
+ /*
|
|
+ * invariant after initialization
|
|
+ */
|
|
+ u8 sched_policy:2;
|
|
+ u8 inherit_rt:1;
|
|
+ u8 accept_fds:1;
|
|
+ u8 min_priority;
|
|
+ };
|
|
+ bool has_async_transaction;
|
|
struct list_head async_todo;
|
|
};
|
|
|
|
struct binder_ref_death {
|
|
+ /**
|
|
+ * @work: worklist element for death notifications
|
|
+ * (protected by inner_lock of the proc that
|
|
+ * this ref belongs to)
|
|
+ */
|
|
struct binder_work work;
|
|
binder_uintptr_t cookie;
|
|
};
|
|
|
|
+/**
|
|
+ * struct binder_ref_data - binder_ref counts and id
|
|
+ * @debug_id: unique ID for the ref
|
|
+ * @desc: unique userspace handle for ref
|
|
+ * @strong: strong ref count (debugging only if not locked)
|
|
+ * @weak: weak ref count (debugging only if not locked)
|
|
+ *
|
|
+ * Structure to hold ref count and ref id information. Since
|
|
+ * the actual ref can only be accessed with a lock, this structure
|
|
+ * is used to return information about the ref to callers of
|
|
+ * ref inc/dec functions.
|
|
+ */
|
|
+struct binder_ref_data {
|
|
+ int debug_id;
|
|
+ uint32_t desc;
|
|
+ int strong;
|
|
+ int weak;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct binder_ref - struct to track references on nodes
|
|
+ * @data: binder_ref_data containing id, handle, and current refcounts
|
|
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
|
|
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
|
|
+ * @node_entry: list entry for node->refs list in target node
|
|
+ * (protected by @node->lock)
|
|
+ * @proc: binder_proc containing ref
|
|
+ * @node: binder_node of target node. When cleaning up a
|
|
+ * ref for deletion in binder_cleanup_ref, a non-NULL
|
|
+ * @node indicates the node must be freed
|
|
+ * @death: pointer to death notification (ref_death) if requested
|
|
+ * (protected by @node->lock)
|
|
+ *
|
|
+ * Structure to track references from procA to target node (on procB). This
|
|
+ * structure is unsafe to access without holding @proc->outer_lock.
|
|
+ */
|
|
struct binder_ref {
|
|
/* Lookups needed: */
|
|
/* node + proc => ref (transaction) */
|
|
/* desc + proc => ref (transaction, inc/dec ref) */
|
|
/* node => refs + procs (proc exit) */
|
|
- int debug_id;
|
|
+ struct binder_ref_data data;
|
|
struct rb_node rb_node_desc;
|
|
struct rb_node rb_node_node;
|
|
struct hlist_node node_entry;
|
|
struct binder_proc *proc;
|
|
struct binder_node *node;
|
|
- uint32_t desc;
|
|
- int strong;
|
|
- int weak;
|
|
struct binder_ref_death *death;
|
|
};
|
|
|
|
-struct binder_buffer {
|
|
- struct list_head entry; /* free and allocated entries by address */
|
|
- struct rb_node rb_node; /* free entry by size or allocated entry */
|
|
- /* by address */
|
|
- unsigned free:1;
|
|
- unsigned allow_user_free:1;
|
|
- unsigned async_transaction:1;
|
|
- unsigned debug_id:29;
|
|
-
|
|
- struct binder_transaction *transaction;
|
|
-
|
|
- struct binder_node *target_node;
|
|
- size_t data_size;
|
|
- size_t offsets_size;
|
|
- uint8_t data[0];
|
|
-};
|
|
-
|
|
enum binder_deferred_state {
|
|
BINDER_DEFERRED_PUT_FILES = 0x01,
|
|
BINDER_DEFERRED_FLUSH = 0x02,
|
|
BINDER_DEFERRED_RELEASE = 0x04,
|
|
};
|
|
|
|
+/**
|
|
+ * struct binder_priority - scheduler policy and priority
|
|
+ * @sched_policy scheduler policy
|
|
+ * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
|
|
+ *
|
|
+ * The binder driver supports inheriting the following scheduler policies:
|
|
+ * SCHED_NORMAL
|
|
+ * SCHED_BATCH
|
|
+ * SCHED_FIFO
|
|
+ * SCHED_RR
|
|
+ */
|
|
+struct binder_priority {
|
|
+ unsigned int sched_policy;
|
|
+ int prio;
|
|
+};
|
|
+
|
|
+/**
|
|
+ * struct binder_proc - binder process bookkeeping
|
|
+ * @proc_node: element for binder_procs list
|
|
+ * @threads: rbtree of binder_threads in this proc
|
|
+ * (protected by @inner_lock)
|
|
+ * @nodes: rbtree of binder nodes associated with
|
|
+ * this proc ordered by node->ptr
|
|
+ * (protected by @inner_lock)
|
|
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
|
|
+ * (protected by @outer_lock)
|
|
+ * @refs_by_node: rbtree of refs ordered by ref->node
|
|
+ * (protected by @outer_lock)
|
|
+ * @waiting_threads: threads currently waiting for proc work
|
|
+ * (protected by @inner_lock)
|
|
+ * @pid PID of group_leader of process
|
|
+ * (invariant after initialized)
|
|
+ * @tsk task_struct for group_leader of process
|
|
+ * (invariant after initialized)
|
|
+ * @files files_struct for process
|
|
+ * (invariant after initialized)
|
|
+ * @deferred_work_node: element for binder_deferred_list
|
|
+ * (protected by binder_deferred_lock)
|
|
+ * @deferred_work: bitmap of deferred work to perform
|
|
+ * (protected by binder_deferred_lock)
|
|
+ * @is_dead: process is dead and awaiting free
|
|
+ * when outstanding transactions are cleaned up
|
|
+ * (protected by @inner_lock)
|
|
+ * @todo: list of work for this process
|
|
+ * (protected by @inner_lock)
|
|
+ * @wait: wait queue head to wait for proc work
|
|
+ * (invariant after initialized)
|
|
+ * @stats: per-process binder statistics
|
|
+ * (atomics, no lock needed)
|
|
+ * @delivered_death: list of delivered death notification
|
|
+ * (protected by @inner_lock)
|
|
+ * @max_threads: cap on number of binder threads
|
|
+ * (protected by @inner_lock)
|
|
+ * @requested_threads: number of binder threads requested but not
|
|
+ * yet started. In current implementation, can
|
|
+ * only be 0 or 1.
|
|
+ * (protected by @inner_lock)
|
|
+ * @requested_threads_started: number binder threads started
|
|
+ * (protected by @inner_lock)
|
|
+ * @tmp_ref: temporary reference to indicate proc is in use
|
|
+ * (protected by @inner_lock)
|
|
+ * @default_priority: default scheduler priority
|
|
+ * (invariant after initialized)
|
|
+ * @debugfs_entry: debugfs node
|
|
+ * @alloc: binder allocator bookkeeping
|
|
+ * @context: binder_context for this proc
|
|
+ * (invariant after initialized)
|
|
+ * @inner_lock: can nest under outer_lock and/or node lock
|
|
+ * @outer_lock: no nesting under innor or node lock
|
|
+ * Lock order: 1) outer, 2) node, 3) inner
|
|
+ *
|
|
+ * Bookkeeping structure for binder processes
|
|
+ */
|
|
struct binder_proc {
|
|
struct hlist_node proc_node;
|
|
struct rb_root threads;
|
|
struct rb_root nodes;
|
|
struct rb_root refs_by_desc;
|
|
struct rb_root refs_by_node;
|
|
+ struct list_head waiting_threads;
|
|
int pid;
|
|
- struct vm_area_struct *vma;
|
|
- struct mm_struct *vma_vm_mm;
|
|
struct task_struct *tsk;
|
|
struct files_struct *files;
|
|
+ struct mutex files_lock;
|
|
struct hlist_node deferred_work_node;
|
|
int deferred_work;
|
|
- void *buffer;
|
|
- ptrdiff_t user_buffer_offset;
|
|
-
|
|
- struct list_head buffers;
|
|
- struct rb_root free_buffers;
|
|
- struct rb_root allocated_buffers;
|
|
- size_t free_async_space;
|
|
+ bool is_dead;
|
|
|
|
- struct page **pages;
|
|
- size_t buffer_size;
|
|
- uint32_t buffer_free;
|
|
struct list_head todo;
|
|
wait_queue_head_t wait;
|
|
struct binder_stats stats;
|
|
@@ -322,9 +565,13 @@ struct binder_proc {
|
|
int max_threads;
|
|
int requested_threads;
|
|
int requested_threads_started;
|
|
- int ready_threads;
|
|
- long default_priority;
|
|
+ int tmp_ref;
|
|
+ struct binder_priority default_priority;
|
|
struct dentry *debugfs_entry;
|
|
+ struct binder_alloc alloc;
|
|
+ struct binder_context *context;
|
|
+ spinlock_t inner_lock;
|
|
+ spinlock_t outer_lock;
|
|
};
|
|
|
|
enum {
|
|
@@ -333,22 +580,60 @@ enum {
|
|
BINDER_LOOPER_STATE_EXITED = 0x04,
|
|
BINDER_LOOPER_STATE_INVALID = 0x08,
|
|
BINDER_LOOPER_STATE_WAITING = 0x10,
|
|
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
|
|
+ BINDER_LOOPER_STATE_POLL = 0x20,
|
|
};
|
|
|
|
+/**
|
|
+ * struct binder_thread - binder thread bookkeeping
|
|
+ * @proc: binder process for this thread
|
|
+ * (invariant after initialization)
|
|
+ * @rb_node: element for proc->threads rbtree
|
|
+ * (protected by @proc->inner_lock)
|
|
+ * @waiting_thread_node: element for @proc->waiting_threads list
|
|
+ * (protected by @proc->inner_lock)
|
|
+ * @pid: PID for this thread
|
|
+ * (invariant after initialization)
|
|
+ * @looper: bitmap of looping state
|
|
+ * (only accessed by this thread)
|
|
+ * @looper_needs_return: looping thread needs to exit driver
|
|
+ * (no lock needed)
|
|
+ * @transaction_stack: stack of in-progress transactions for this thread
|
|
+ * (protected by @proc->inner_lock)
|
|
+ * @todo: list of work to do for this thread
|
|
+ * (protected by @proc->inner_lock)
|
|
+ * @return_error: transaction errors reported by this thread
|
|
+ * (only accessed by this thread)
|
|
+ * @reply_error: transaction errors reported by target thread
|
|
+ * (protected by @proc->inner_lock)
|
|
+ * @wait: wait queue for thread work
|
|
+ * @stats: per-thread statistics
|
|
+ * (atomics, no lock needed)
|
|
+ * @tmp_ref: temporary reference to indicate thread is in use
|
|
+ * (atomic since @proc->inner_lock cannot
|
|
+ * always be acquired)
|
|
+ * @is_dead: thread is dead and awaiting free
|
|
+ * when outstanding transactions are cleaned up
|
|
+ * (protected by @proc->inner_lock)
|
|
+ * @task: struct task_struct for this thread
|
|
+ *
|
|
+ * Bookkeeping structure for binder threads.
|
|
+ */
|
|
struct binder_thread {
|
|
struct binder_proc *proc;
|
|
struct rb_node rb_node;
|
|
+ struct list_head waiting_thread_node;
|
|
int pid;
|
|
- int looper;
|
|
+ int looper; /* only modified by this thread */
|
|
+ bool looper_need_return; /* can be written by other thread */
|
|
struct binder_transaction *transaction_stack;
|
|
struct list_head todo;
|
|
- uint32_t return_error; /* Write failed, return error code in read buf */
|
|
- uint32_t return_error2; /* Write failed, return error code in read */
|
|
- /* buffer. Used when sending a reply to a dead process that */
|
|
- /* we are also waiting on */
|
|
+ struct binder_error return_error;
|
|
+ struct binder_error reply_error;
|
|
wait_queue_head_t wait;
|
|
struct binder_stats stats;
|
|
+ atomic_t tmp_ref;
|
|
+ bool is_dead;
|
|
+ struct task_struct *task;
|
|
};
|
|
|
|
struct binder_transaction {
|
|
@@ -365,30 +650,280 @@ struct binder_transaction {
|
|
struct binder_buffer *buffer;
|
|
unsigned int code;
|
|
unsigned int flags;
|
|
- long priority;
|
|
- long saved_priority;
|
|
+ struct binder_priority priority;
|
|
+ struct binder_priority saved_priority;
|
|
+ bool set_priority_called;
|
|
kuid_t sender_euid;
|
|
+ /**
|
|
+ * @lock: protects @from, @to_proc, and @to_thread
|
|
+ *
|
|
+ * @from, @to_proc, and @to_thread can be set to NULL
|
|
+ * during thread teardown
|
|
+ */
|
|
+ spinlock_t lock;
|
|
};
|
|
|
|
+/**
|
|
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
|
|
+ * @proc: struct binder_proc to acquire
|
|
+ *
|
|
+ * Acquires proc->outer_lock. Used to protect binder_ref
|
|
+ * structures associated with the given proc.
|
|
+ */
|
|
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
|
|
+static void
|
|
+_binder_proc_lock(struct binder_proc *proc, int line)
|
|
+{
|
|
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
+ "%s: line=%d\n", __func__, line);
|
|
+ spin_lock(&proc->outer_lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_proc_unlock() - Release spinlock for given binder_proc
|
|
+ * @proc: struct binder_proc to acquire
|
|
+ *
|
|
+ * Release lock acquired via binder_proc_lock()
|
|
+ */
|
|
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
|
|
+static void
|
|
+_binder_proc_unlock(struct binder_proc *proc, int line)
|
|
+{
|
|
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
+ "%s: line=%d\n", __func__, line);
|
|
+ spin_unlock(&proc->outer_lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
|
|
+ * @proc: struct binder_proc to acquire
|
|
+ *
|
|
+ * Acquires proc->inner_lock. Used to protect todo lists
|
|
+ */
|
|
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
|
|
+static void
|
|
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
|
|
+{
|
|
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
+ "%s: line=%d\n", __func__, line);
|
|
+ spin_lock(&proc->inner_lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
|
|
+ * @proc: struct binder_proc to acquire
|
|
+ *
|
|
+ * Release lock acquired via binder_inner_proc_lock()
|
|
+ */
|
|
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
|
|
+static void
|
|
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
|
|
+{
|
|
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
+ "%s: line=%d\n", __func__, line);
|
|
+ spin_unlock(&proc->inner_lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_node_lock() - Acquire spinlock for given binder_node
|
|
+ * @node: struct binder_node to acquire
|
|
+ *
|
|
+ * Acquires node->lock. Used to protect binder_node fields
|
|
+ */
|
|
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
|
|
+static void
|
|
+_binder_node_lock(struct binder_node *node, int line)
|
|
+{
|
|
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
+ "%s: line=%d\n", __func__, line);
|
|
+ spin_lock(&node->lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_node_unlock() - Release spinlock for given binder_proc
|
|
+ * @node: struct binder_node to acquire
|
|
+ *
|
|
+ * Release lock acquired via binder_node_lock()
|
|
+ */
|
|
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
|
|
+static void
|
|
+_binder_node_unlock(struct binder_node *node, int line)
|
|
+{
|
|
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
+ "%s: line=%d\n", __func__, line);
|
|
+ spin_unlock(&node->lock);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_node_inner_lock() - Acquire node and inner locks
|
|
+ * @node: struct binder_node to acquire
|
|
+ *
|
|
+ * Acquires node->lock. If node->proc also acquires
|
|
+ * proc->inner_lock. Used to protect binder_node fields
|
|
+ */
|
|
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
|
|
+static void
|
|
+_binder_node_inner_lock(struct binder_node *node, int line)
|
|
+{
|
|
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
+ "%s: line=%d\n", __func__, line);
|
|
+ spin_lock(&node->lock);
|
|
+ if (node->proc)
|
|
+ binder_inner_proc_lock(node->proc);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_node_unlock() - Release node and inner locks
|
|
+ * @node: struct binder_node to acquire
|
|
+ *
|
|
+ * Release lock acquired via binder_node_lock()
|
|
+ */
|
|
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
|
|
+static void
|
|
+_binder_node_inner_unlock(struct binder_node *node, int line)
|
|
+{
|
|
+ struct binder_proc *proc = node->proc;
|
|
+
|
|
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
|
|
+ "%s: line=%d\n", __func__, line);
|
|
+ if (proc)
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ spin_unlock(&node->lock);
|
|
+}
|
|
+
|
|
+static bool binder_worklist_empty_ilocked(struct list_head *list)
|
|
+{
|
|
+ return list_empty(list);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_worklist_empty() - Check if no items on the work list
|
|
+ * @proc: binder_proc associated with list
|
|
+ * @list: list to check
|
|
+ *
|
|
+ * Return: true if there are no items on list, else false
|
|
+ */
|
|
+static bool binder_worklist_empty(struct binder_proc *proc,
|
|
+ struct list_head *list)
|
|
+{
|
|
+ bool ret;
|
|
+
|
|
+ binder_inner_proc_lock(proc);
|
|
+ ret = binder_worklist_empty_ilocked(list);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void
|
|
+binder_enqueue_work_ilocked(struct binder_work *work,
|
|
+ struct list_head *target_list)
|
|
+{
|
|
+ BUG_ON(target_list == NULL);
|
|
+ BUG_ON(work->entry.next && !list_empty(&work->entry));
|
|
+ list_add_tail(&work->entry, target_list);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_enqueue_work() - Add an item to the work list
|
|
+ * @proc: binder_proc associated with list
|
|
+ * @work: struct binder_work to add to list
|
|
+ * @target_list: list to add work to
|
|
+ *
|
|
+ * Adds the work to the specified list. Asserts that work
|
|
+ * is not already on a list.
|
|
+ */
|
|
+static void
|
|
+binder_enqueue_work(struct binder_proc *proc,
|
|
+ struct binder_work *work,
|
|
+ struct list_head *target_list)
|
|
+{
|
|
+ binder_inner_proc_lock(proc);
|
|
+ binder_enqueue_work_ilocked(work, target_list);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+}
|
|
+
|
|
+static void
|
|
+binder_dequeue_work_ilocked(struct binder_work *work)
|
|
+{
|
|
+ list_del_init(&work->entry);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_dequeue_work() - Removes an item from the work list
|
|
+ * @proc: binder_proc associated with list
|
|
+ * @work: struct binder_work to remove from list
|
|
+ *
|
|
+ * Removes the specified work item from whatever list it is on.
|
|
+ * Can safely be called if work is not on any list.
|
|
+ */
|
|
+static void
|
|
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
|
|
+{
|
|
+ binder_inner_proc_lock(proc);
|
|
+ binder_dequeue_work_ilocked(work);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+}
|
|
+
|
|
+static struct binder_work *binder_dequeue_work_head_ilocked(
|
|
+ struct list_head *list)
|
|
+{
|
|
+ struct binder_work *w;
|
|
+
|
|
+ w = list_first_entry_or_null(list, struct binder_work, entry);
|
|
+ if (w)
|
|
+ list_del_init(&w->entry);
|
|
+ return w;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_dequeue_work_head() - Dequeues the item at head of list
|
|
+ * @proc: binder_proc associated with list
|
|
+ * @list: list to dequeue head
|
|
+ *
|
|
+ * Removes the head of the list if there are items on the list
|
|
+ *
|
|
+ * Return: pointer dequeued binder_work, NULL if list was empty
|
|
+ */
|
|
+static struct binder_work *binder_dequeue_work_head(
|
|
+ struct binder_proc *proc,
|
|
+ struct list_head *list)
|
|
+{
|
|
+ struct binder_work *w;
|
|
+
|
|
+ binder_inner_proc_lock(proc);
|
|
+ w = binder_dequeue_work_head_ilocked(list);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ return w;
|
|
+}
|
|
+
|
|
static void
|
|
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
|
|
+static void binder_free_thread(struct binder_thread *thread);
|
|
+static void binder_free_proc(struct binder_proc *proc);
|
|
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
|
|
|
|
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
|
|
{
|
|
- struct files_struct *files = proc->files;
|
|
unsigned long rlim_cur;
|
|
unsigned long irqs;
|
|
+ int ret;
|
|
|
|
- if (files == NULL)
|
|
- return -ESRCH;
|
|
-
|
|
- if (!lock_task_sighand(proc->tsk, &irqs))
|
|
- return -EMFILE;
|
|
-
|
|
+ mutex_lock(&proc->files_lock);
|
|
+ if (proc->files == NULL) {
|
|
+ ret = -ESRCH;
|
|
+ goto err;
|
|
+ }
|
|
+ if (!lock_task_sighand(proc->tsk, &irqs)) {
|
|
+ ret = -EMFILE;
|
|
+ goto err;
|
|
+ }
|
|
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
|
|
unlock_task_sighand(proc->tsk, &irqs);
|
|
|
|
- return __alloc_fd(files, 0, rlim_cur, flags);
|
|
+ ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
|
|
+err:
|
|
+ mutex_unlock(&proc->files_lock);
|
|
+ return ret;
|
|
}
|
|
|
|
/*
|
|
@@ -397,8 +932,10 @@ static int task_get_unused_fd_flags(stru
|
|
static void task_fd_install(
|
|
struct binder_proc *proc, unsigned int fd, struct file *file)
|
|
{
|
|
+ mutex_lock(&proc->files_lock);
|
|
if (proc->files)
|
|
__fd_install(proc->files, fd, file);
|
|
+ mutex_unlock(&proc->files_lock);
|
|
}
|
|
|
|
/*
|
|
@@ -408,9 +945,11 @@ static long task_close_fd(struct binder_
|
|
{
|
|
int retval;
|
|
|
|
- if (proc->files == NULL)
|
|
- return -ESRCH;
|
|
-
|
|
+ mutex_lock(&proc->files_lock);
|
|
+ if (proc->files == NULL) {
|
|
+ retval = -ESRCH;
|
|
+ goto err;
|
|
+ }
|
|
retval = __close_fd(proc->files, fd);
|
|
/* can't restart close syscall because file table entry was cleared */
|
|
if (unlikely(retval == -ERESTARTSYS ||
|
|
@@ -418,457 +957,286 @@ static long task_close_fd(struct binder_
|
|
retval == -ERESTARTNOHAND ||
|
|
retval == -ERESTART_RESTARTBLOCK))
|
|
retval = -EINTR;
|
|
-
|
|
+err:
|
|
+ mutex_unlock(&proc->files_lock);
|
|
return retval;
|
|
}
|
|
|
|
-static inline void binder_lock(const char *tag)
|
|
+static bool binder_has_work_ilocked(struct binder_thread *thread,
|
|
+ bool do_proc_work)
|
|
{
|
|
- trace_binder_lock(tag);
|
|
- mutex_lock(&binder_main_lock);
|
|
- trace_binder_locked(tag);
|
|
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
|
|
+ thread->looper_need_return ||
|
|
+ (do_proc_work &&
|
|
+ !binder_worklist_empty_ilocked(&thread->proc->todo));
|
|
}
|
|
|
|
-static inline void binder_unlock(const char *tag)
|
|
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
|
|
{
|
|
- trace_binder_unlock(tag);
|
|
- mutex_unlock(&binder_main_lock);
|
|
-}
|
|
+ bool has_work;
|
|
|
|
-static void binder_set_nice(long nice)
|
|
-{
|
|
- long min_nice;
|
|
+ binder_inner_proc_lock(thread->proc);
|
|
+ has_work = binder_has_work_ilocked(thread, do_proc_work);
|
|
+ binder_inner_proc_unlock(thread->proc);
|
|
|
|
- if (can_nice(current, nice)) {
|
|
- set_user_nice(current, nice);
|
|
- return;
|
|
- }
|
|
- min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
|
|
- binder_debug(BINDER_DEBUG_PRIORITY_CAP,
|
|
- "%d: nice value %ld not allowed use %ld instead\n",
|
|
- current->pid, nice, min_nice);
|
|
- set_user_nice(current, min_nice);
|
|
- if (min_nice <= MAX_NICE)
|
|
- return;
|
|
- binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
|
|
+ return has_work;
|
|
}
|
|
|
|
-static size_t binder_buffer_size(struct binder_proc *proc,
|
|
- struct binder_buffer *buffer)
|
|
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
|
|
{
|
|
- if (list_is_last(&buffer->entry, &proc->buffers))
|
|
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
|
|
- return (size_t)list_entry(buffer->entry.next,
|
|
- struct binder_buffer, entry) - (size_t)buffer->data;
|
|
+ return !thread->transaction_stack &&
|
|
+ binder_worklist_empty_ilocked(&thread->todo) &&
|
|
+ (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
|
|
+ BINDER_LOOPER_STATE_REGISTERED));
|
|
}
|
|
|
|
-static void binder_insert_free_buffer(struct binder_proc *proc,
|
|
- struct binder_buffer *new_buffer)
|
|
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
|
|
+ bool sync)
|
|
{
|
|
- struct rb_node **p = &proc->free_buffers.rb_node;
|
|
- struct rb_node *parent = NULL;
|
|
- struct binder_buffer *buffer;
|
|
- size_t buffer_size;
|
|
- size_t new_buffer_size;
|
|
-
|
|
- BUG_ON(!new_buffer->free);
|
|
-
|
|
- new_buffer_size = binder_buffer_size(proc, new_buffer);
|
|
-
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: add free buffer, size %zd, at %p\n",
|
|
- proc->pid, new_buffer_size, new_buffer);
|
|
-
|
|
- while (*p) {
|
|
- parent = *p;
|
|
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
|
|
- BUG_ON(!buffer->free);
|
|
-
|
|
- buffer_size = binder_buffer_size(proc, buffer);
|
|
+ struct rb_node *n;
|
|
+ struct binder_thread *thread;
|
|
|
|
- if (new_buffer_size < buffer_size)
|
|
- p = &parent->rb_left;
|
|
- else
|
|
- p = &parent->rb_right;
|
|
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
|
|
+ thread = rb_entry(n, struct binder_thread, rb_node);
|
|
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
|
|
+ binder_available_for_proc_work_ilocked(thread)) {
|
|
+ if (sync)
|
|
+ wake_up_interruptible_sync(&thread->wait);
|
|
+ else
|
|
+ wake_up_interruptible(&thread->wait);
|
|
+ }
|
|
}
|
|
- rb_link_node(&new_buffer->rb_node, parent, p);
|
|
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
|
|
}
|
|
|
|
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
|
|
- struct binder_buffer *new_buffer)
|
|
+/**
|
|
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
|
|
+ * @proc: process to select a thread from
|
|
+ *
|
|
+ * Note that calling this function moves the thread off the waiting_threads
|
|
+ * list, so it can only be woken up by the caller of this function, or a
|
|
+ * signal. Therefore, callers *should* always wake up the thread this function
|
|
+ * returns.
|
|
+ *
|
|
+ * Return: If there's a thread currently waiting for process work,
|
|
+ * returns that thread. Otherwise returns NULL.
|
|
+ */
|
|
+static struct binder_thread *
|
|
+binder_select_thread_ilocked(struct binder_proc *proc)
|
|
{
|
|
- struct rb_node **p = &proc->allocated_buffers.rb_node;
|
|
- struct rb_node *parent = NULL;
|
|
- struct binder_buffer *buffer;
|
|
+ struct binder_thread *thread;
|
|
|
|
- BUG_ON(new_buffer->free);
|
|
+ assert_spin_locked(&proc->inner_lock);
|
|
+ thread = list_first_entry_or_null(&proc->waiting_threads,
|
|
+ struct binder_thread,
|
|
+ waiting_thread_node);
|
|
|
|
- while (*p) {
|
|
- parent = *p;
|
|
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
|
|
- BUG_ON(buffer->free);
|
|
+ if (thread)
|
|
+ list_del_init(&thread->waiting_thread_node);
|
|
|
|
- if (new_buffer < buffer)
|
|
- p = &parent->rb_left;
|
|
- else if (new_buffer > buffer)
|
|
- p = &parent->rb_right;
|
|
- else
|
|
- BUG();
|
|
- }
|
|
- rb_link_node(&new_buffer->rb_node, parent, p);
|
|
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
|
|
+ return thread;
|
|
}
|
|
|
|
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
|
|
- uintptr_t user_ptr)
|
|
+/**
|
|
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
|
|
+ * @proc: process to wake up a thread in
|
|
+ * @thread: specific thread to wake-up (may be NULL)
|
|
+ * @sync: whether to do a synchronous wake-up
|
|
+ *
|
|
+ * This function wakes up a thread in the @proc process.
|
|
+ * The caller may provide a specific thread to wake-up in
|
|
+ * the @thread parameter. If @thread is NULL, this function
|
|
+ * will wake up threads that have called poll().
|
|
+ *
|
|
+ * Note that for this function to work as expected, callers
|
|
+ * should first call binder_select_thread() to find a thread
|
|
+ * to handle the work (if they don't have a thread already),
|
|
+ * and pass the result into the @thread parameter.
|
|
+ */
|
|
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
|
|
+ struct binder_thread *thread,
|
|
+ bool sync)
|
|
{
|
|
- struct rb_node *n = proc->allocated_buffers.rb_node;
|
|
- struct binder_buffer *buffer;
|
|
- struct binder_buffer *kern_ptr;
|
|
-
|
|
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
|
|
- - offsetof(struct binder_buffer, data));
|
|
-
|
|
- while (n) {
|
|
- buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
- BUG_ON(buffer->free);
|
|
+ assert_spin_locked(&proc->inner_lock);
|
|
|
|
- if (kern_ptr < buffer)
|
|
- n = n->rb_left;
|
|
- else if (kern_ptr > buffer)
|
|
- n = n->rb_right;
|
|
+ if (thread) {
|
|
+ if (sync)
|
|
+ wake_up_interruptible_sync(&thread->wait);
|
|
else
|
|
- return buffer;
|
|
+ wake_up_interruptible(&thread->wait);
|
|
+ return;
|
|
}
|
|
- return NULL;
|
|
+
|
|
+ /* Didn't find a thread waiting for proc work; this can happen
|
|
+ * in two scenarios:
|
|
+ * 1. All threads are busy handling transactions
|
|
+ * In that case, one of those threads should call back into
|
|
+ * the kernel driver soon and pick up this work.
|
|
+ * 2. Threads are using the (e)poll interface, in which case
|
|
+ * they may be blocked on the waitqueue without having been
|
|
+ * added to waiting_threads. For this case, we just iterate
|
|
+ * over all threads not handling transaction work, and
|
|
+ * wake them all up. We wake all because we don't know whether
|
|
+ * a thread that called into (e)poll is handling non-binder
|
|
+ * work currently.
|
|
+ */
|
|
+ binder_wakeup_poll_threads_ilocked(proc, sync);
|
|
}
|
|
|
|
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
|
|
- void *start, void *end,
|
|
- struct vm_area_struct *vma)
|
|
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
|
|
{
|
|
- void *page_addr;
|
|
- unsigned long user_page_addr;
|
|
- struct page **page;
|
|
- struct mm_struct *mm;
|
|
-
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: %s pages %p-%p\n", proc->pid,
|
|
- allocate ? "allocate" : "free", start, end);
|
|
-
|
|
- if (end <= start)
|
|
- return 0;
|
|
-
|
|
- trace_binder_update_page_range(proc, allocate, start, end);
|
|
-
|
|
- if (vma)
|
|
- mm = NULL;
|
|
- else
|
|
- mm = get_task_mm(proc->tsk);
|
|
-
|
|
- if (mm) {
|
|
- down_write(&mm->mmap_sem);
|
|
- vma = proc->vma;
|
|
- if (vma && mm != proc->vma_vm_mm) {
|
|
- pr_err("%d: vma mm and task mm mismatch\n",
|
|
- proc->pid);
|
|
- vma = NULL;
|
|
- }
|
|
- }
|
|
-
|
|
- if (allocate == 0)
|
|
- goto free_range;
|
|
+ struct binder_thread *thread = binder_select_thread_ilocked(proc);
|
|
|
|
- if (vma == NULL) {
|
|
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
|
|
- proc->pid);
|
|
- goto err_no_vma;
|
|
- }
|
|
+ binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
|
|
+}
|
|
|
|
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
|
|
- int ret;
|
|
+static bool is_rt_policy(int policy)
|
|
+{
|
|
+ return policy == SCHED_FIFO || policy == SCHED_RR;
|
|
+}
|
|
|
|
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
|
|
+static bool is_fair_policy(int policy)
|
|
+{
|
|
+ return policy == SCHED_NORMAL || policy == SCHED_BATCH;
|
|
+}
|
|
|
|
- BUG_ON(*page);
|
|
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
|
|
- if (*page == NULL) {
|
|
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
|
|
- proc->pid, page_addr);
|
|
- goto err_alloc_page_failed;
|
|
- }
|
|
- ret = map_kernel_range_noflush((unsigned long)page_addr,
|
|
- PAGE_SIZE, PAGE_KERNEL, page);
|
|
- flush_cache_vmap((unsigned long)page_addr,
|
|
- (unsigned long)page_addr + PAGE_SIZE);
|
|
- if (ret != 1) {
|
|
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
|
|
- proc->pid, page_addr);
|
|
- goto err_map_kernel_failed;
|
|
- }
|
|
- user_page_addr =
|
|
- (uintptr_t)page_addr + proc->user_buffer_offset;
|
|
- ret = vm_insert_page(vma, user_page_addr, page[0]);
|
|
- if (ret) {
|
|
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
|
|
- proc->pid, user_page_addr);
|
|
- goto err_vm_insert_page_failed;
|
|
- }
|
|
- /* vm_insert_page does not seem to increment the refcount */
|
|
- }
|
|
- if (mm) {
|
|
- up_write(&mm->mmap_sem);
|
|
- mmput(mm);
|
|
- }
|
|
- return 0;
|
|
+static bool binder_supported_policy(int policy)
|
|
+{
|
|
+ return is_fair_policy(policy) || is_rt_policy(policy);
|
|
+}
|
|
|
|
-free_range:
|
|
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
|
|
- page_addr -= PAGE_SIZE) {
|
|
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
|
|
- if (vma)
|
|
- zap_page_range(vma, (uintptr_t)page_addr +
|
|
- proc->user_buffer_offset, PAGE_SIZE, NULL);
|
|
-err_vm_insert_page_failed:
|
|
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
|
|
-err_map_kernel_failed:
|
|
- __free_page(*page);
|
|
- *page = NULL;
|
|
-err_alloc_page_failed:
|
|
- ;
|
|
- }
|
|
-err_no_vma:
|
|
- if (mm) {
|
|
- up_write(&mm->mmap_sem);
|
|
- mmput(mm);
|
|
- }
|
|
- return -ENOMEM;
|
|
+static int to_userspace_prio(int policy, int kernel_priority)
|
|
+{
|
|
+ if (is_fair_policy(policy))
|
|
+ return PRIO_TO_NICE(kernel_priority);
|
|
+ else
|
|
+ return MAX_USER_RT_PRIO - 1 - kernel_priority;
|
|
}
|
|
|
|
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
|
|
- size_t data_size,
|
|
- size_t offsets_size, int is_async)
|
|
+static int to_kernel_prio(int policy, int user_priority)
|
|
{
|
|
- struct rb_node *n = proc->free_buffers.rb_node;
|
|
- struct binder_buffer *buffer;
|
|
- size_t buffer_size;
|
|
- struct rb_node *best_fit = NULL;
|
|
- void *has_page_addr;
|
|
- void *end_page_addr;
|
|
- size_t size;
|
|
+ if (is_fair_policy(policy))
|
|
+ return NICE_TO_PRIO(user_priority);
|
|
+ else
|
|
+ return MAX_USER_RT_PRIO - 1 - user_priority;
|
|
+}
|
|
|
|
- if (proc->vma == NULL) {
|
|
- pr_err("%d: binder_alloc_buf, no vma\n",
|
|
- proc->pid);
|
|
- return NULL;
|
|
- }
|
|
+static void binder_do_set_priority(struct task_struct *task,
|
|
+ struct binder_priority desired,
|
|
+ bool verify)
|
|
+{
|
|
+ int priority; /* user-space prio value */
|
|
+ bool has_cap_nice;
|
|
+ unsigned int policy = desired.sched_policy;
|
|
|
|
- size = ALIGN(data_size, sizeof(void *)) +
|
|
- ALIGN(offsets_size, sizeof(void *));
|
|
+ if (task->policy == policy && task->normal_prio == desired.prio)
|
|
+ return;
|
|
|
|
- if (size < data_size || size < offsets_size) {
|
|
- binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
|
|
- proc->pid, data_size, offsets_size);
|
|
- return NULL;
|
|
- }
|
|
+ has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
|
|
|
|
- if (is_async &&
|
|
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
|
|
- proc->pid, size);
|
|
- return NULL;
|
|
- }
|
|
+ priority = to_userspace_prio(policy, desired.prio);
|
|
|
|
- while (n) {
|
|
- buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
- BUG_ON(!buffer->free);
|
|
- buffer_size = binder_buffer_size(proc, buffer);
|
|
+ if (verify && is_rt_policy(policy) && !has_cap_nice) {
|
|
+ long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
|
|
|
|
- if (size < buffer_size) {
|
|
- best_fit = n;
|
|
- n = n->rb_left;
|
|
- } else if (size > buffer_size)
|
|
- n = n->rb_right;
|
|
- else {
|
|
- best_fit = n;
|
|
- break;
|
|
+ if (max_rtprio == 0) {
|
|
+ policy = SCHED_NORMAL;
|
|
+ priority = MIN_NICE;
|
|
+ } else if (priority > max_rtprio) {
|
|
+ priority = max_rtprio;
|
|
}
|
|
}
|
|
- if (best_fit == NULL) {
|
|
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
|
|
- proc->pid, size);
|
|
- return NULL;
|
|
- }
|
|
- if (n == NULL) {
|
|
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
|
|
- buffer_size = binder_buffer_size(proc, buffer);
|
|
- }
|
|
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
|
|
- proc->pid, size, buffer, buffer_size);
|
|
+ if (verify && is_fair_policy(policy) && !has_cap_nice) {
|
|
+ long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
|
|
|
|
- has_page_addr =
|
|
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
|
|
- if (n == NULL) {
|
|
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
|
|
- buffer_size = size; /* no room for other buffers */
|
|
- else
|
|
- buffer_size = size + sizeof(struct binder_buffer);
|
|
+ if (min_nice > MAX_NICE) {
|
|
+ binder_user_error("%d RLIMIT_NICE not set\n",
|
|
+ task->pid);
|
|
+ return;
|
|
+ } else if (priority < min_nice) {
|
|
+ priority = min_nice;
|
|
+ }
|
|
}
|
|
- end_page_addr =
|
|
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
|
|
- if (end_page_addr > has_page_addr)
|
|
- end_page_addr = has_page_addr;
|
|
- if (binder_update_page_range(proc, 1,
|
|
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
|
|
- return NULL;
|
|
|
|
- rb_erase(best_fit, &proc->free_buffers);
|
|
- buffer->free = 0;
|
|
- binder_insert_allocated_buffer(proc, buffer);
|
|
- if (buffer_size != size) {
|
|
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
|
|
+ if (policy != desired.sched_policy ||
|
|
+ to_kernel_prio(policy, priority) != desired.prio)
|
|
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
|
|
+ "%d: priority %d not allowed, using %d instead\n",
|
|
+ task->pid, desired.prio,
|
|
+ to_kernel_prio(policy, priority));
|
|
|
|
- list_add(&new_buffer->entry, &buffer->entry);
|
|
- new_buffer->free = 1;
|
|
- binder_insert_free_buffer(proc, new_buffer);
|
|
- }
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: binder_alloc_buf size %zd got %p\n",
|
|
- proc->pid, size, buffer);
|
|
- buffer->data_size = data_size;
|
|
- buffer->offsets_size = offsets_size;
|
|
- buffer->async_transaction = is_async;
|
|
- if (is_async) {
|
|
- proc->free_async_space -= size + sizeof(struct binder_buffer);
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
|
|
- "%d: binder_alloc_buf size %zd async free %zd\n",
|
|
- proc->pid, size, proc->free_async_space);
|
|
- }
|
|
+ /* Set the actual priority */
|
|
+ if (task->policy != policy || is_rt_policy(policy)) {
|
|
+ struct sched_param params;
|
|
|
|
- return buffer;
|
|
-}
|
|
+ params.sched_priority = is_rt_policy(policy) ? priority : 0;
|
|
|
|
-static void *buffer_start_page(struct binder_buffer *buffer)
|
|
-{
|
|
- return (void *)((uintptr_t)buffer & PAGE_MASK);
|
|
+ sched_setscheduler_nocheck(task,
|
|
+ policy | SCHED_RESET_ON_FORK,
|
|
+ ¶ms);
|
|
+ }
|
|
+ if (is_fair_policy(policy))
|
|
+ set_user_nice(task, priority);
|
|
}
|
|
|
|
-static void *buffer_end_page(struct binder_buffer *buffer)
|
|
+static void binder_set_priority(struct task_struct *task,
|
|
+ struct binder_priority desired)
|
|
{
|
|
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
|
|
+ binder_do_set_priority(task, desired, /* verify = */ true);
|
|
}
|
|
|
|
-static void binder_delete_free_buffer(struct binder_proc *proc,
|
|
- struct binder_buffer *buffer)
|
|
+static void binder_restore_priority(struct task_struct *task,
|
|
+ struct binder_priority desired)
|
|
{
|
|
- struct binder_buffer *prev, *next = NULL;
|
|
- int free_page_end = 1;
|
|
- int free_page_start = 1;
|
|
-
|
|
- BUG_ON(proc->buffers.next == &buffer->entry);
|
|
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
|
|
- BUG_ON(!prev->free);
|
|
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
|
|
- free_page_start = 0;
|
|
- if (buffer_end_page(prev) == buffer_end_page(buffer))
|
|
- free_page_end = 0;
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: merge free, buffer %p share page with %p\n",
|
|
- proc->pid, buffer, prev);
|
|
- }
|
|
-
|
|
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
|
|
- next = list_entry(buffer->entry.next,
|
|
- struct binder_buffer, entry);
|
|
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
|
|
- free_page_end = 0;
|
|
- if (buffer_start_page(next) ==
|
|
- buffer_start_page(buffer))
|
|
- free_page_start = 0;
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: merge free, buffer %p share page with %p\n",
|
|
- proc->pid, buffer, prev);
|
|
- }
|
|
- }
|
|
- list_del(&buffer->entry);
|
|
- if (free_page_start || free_page_end) {
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
|
|
- proc->pid, buffer, free_page_start ? "" : " end",
|
|
- free_page_end ? "" : " start", prev, next);
|
|
- binder_update_page_range(proc, 0, free_page_start ?
|
|
- buffer_start_page(buffer) : buffer_end_page(buffer),
|
|
- (free_page_end ? buffer_end_page(buffer) :
|
|
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
|
|
- }
|
|
+ binder_do_set_priority(task, desired, /* verify = */ false);
|
|
}
|
|
|
|
-static void binder_free_buf(struct binder_proc *proc,
|
|
- struct binder_buffer *buffer)
|
|
+static void binder_transaction_priority(struct task_struct *task,
|
|
+ struct binder_transaction *t,
|
|
+ struct binder_priority node_prio,
|
|
+ bool inherit_rt)
|
|
{
|
|
- size_t size, buffer_size;
|
|
-
|
|
- buffer_size = binder_buffer_size(proc, buffer);
|
|
-
|
|
- size = ALIGN(buffer->data_size, sizeof(void *)) +
|
|
- ALIGN(buffer->offsets_size, sizeof(void *));
|
|
-
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
|
|
- proc->pid, buffer, size, buffer_size);
|
|
+ struct binder_priority desired_prio;
|
|
|
|
- BUG_ON(buffer->free);
|
|
- BUG_ON(size > buffer_size);
|
|
- BUG_ON(buffer->transaction != NULL);
|
|
- BUG_ON((void *)buffer < proc->buffer);
|
|
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
|
|
+ if (t->set_priority_called)
|
|
+ return;
|
|
|
|
- if (buffer->async_transaction) {
|
|
- proc->free_async_space += size + sizeof(struct binder_buffer);
|
|
+ t->set_priority_called = true;
|
|
+ t->saved_priority.sched_policy = task->policy;
|
|
+ t->saved_priority.prio = task->normal_prio;
|
|
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
|
|
- "%d: binder_free_buf size %zd async free %zd\n",
|
|
- proc->pid, size, proc->free_async_space);
|
|
+ if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
|
|
+ desired_prio.prio = NICE_TO_PRIO(0);
|
|
+ desired_prio.sched_policy = SCHED_NORMAL;
|
|
+ } else {
|
|
+ desired_prio.prio = t->priority.prio;
|
|
+ desired_prio.sched_policy = t->priority.sched_policy;
|
|
}
|
|
|
|
- binder_update_page_range(proc, 0,
|
|
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
|
|
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
|
|
- NULL);
|
|
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
|
|
- buffer->free = 1;
|
|
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
|
|
- struct binder_buffer *next = list_entry(buffer->entry.next,
|
|
- struct binder_buffer, entry);
|
|
-
|
|
- if (next->free) {
|
|
- rb_erase(&next->rb_node, &proc->free_buffers);
|
|
- binder_delete_free_buffer(proc, next);
|
|
- }
|
|
+ if (node_prio.prio < t->priority.prio ||
|
|
+ (node_prio.prio == t->priority.prio &&
|
|
+ node_prio.sched_policy == SCHED_FIFO)) {
|
|
+ /*
|
|
+ * In case the minimum priority on the node is
|
|
+ * higher (lower value), use that priority. If
|
|
+ * the priority is the same, but the node uses
|
|
+ * SCHED_FIFO, prefer SCHED_FIFO, since it can
|
|
+ * run unbounded, unlike SCHED_RR.
|
|
+ */
|
|
+ desired_prio = node_prio;
|
|
}
|
|
- if (proc->buffers.next != &buffer->entry) {
|
|
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
|
|
- struct binder_buffer, entry);
|
|
|
|
- if (prev->free) {
|
|
- binder_delete_free_buffer(proc, buffer);
|
|
- rb_erase(&prev->rb_node, &proc->free_buffers);
|
|
- buffer = prev;
|
|
- }
|
|
- }
|
|
- binder_insert_free_buffer(proc, buffer);
|
|
+ binder_set_priority(task, desired_prio);
|
|
}
|
|
|
|
-static struct binder_node *binder_get_node(struct binder_proc *proc,
|
|
- binder_uintptr_t ptr)
|
|
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
|
|
+ binder_uintptr_t ptr)
|
|
{
|
|
struct rb_node *n = proc->nodes.rb_node;
|
|
struct binder_node *node;
|
|
|
|
+ assert_spin_locked(&proc->inner_lock);
|
|
+
|
|
while (n) {
|
|
node = rb_entry(n, struct binder_node, rb_node);
|
|
|
|
@@ -876,21 +1244,47 @@ static struct binder_node *binder_get_no
|
|
n = n->rb_left;
|
|
else if (ptr > node->ptr)
|
|
n = n->rb_right;
|
|
- else
|
|
+ else {
|
|
+ /*
|
|
+ * take an implicit weak reference
|
|
+ * to ensure node stays alive until
|
|
+ * call to binder_put_node()
|
|
+ */
|
|
+ binder_inc_node_tmpref_ilocked(node);
|
|
return node;
|
|
+ }
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
-static struct binder_node *binder_new_node(struct binder_proc *proc,
|
|
- binder_uintptr_t ptr,
|
|
- binder_uintptr_t cookie)
|
|
+static struct binder_node *binder_get_node(struct binder_proc *proc,
|
|
+ binder_uintptr_t ptr)
|
|
+{
|
|
+ struct binder_node *node;
|
|
+
|
|
+ binder_inner_proc_lock(proc);
|
|
+ node = binder_get_node_ilocked(proc, ptr);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ return node;
|
|
+}
|
|
+
|
|
+static struct binder_node *binder_init_node_ilocked(
|
|
+ struct binder_proc *proc,
|
|
+ struct binder_node *new_node,
|
|
+ struct flat_binder_object *fp)
|
|
{
|
|
struct rb_node **p = &proc->nodes.rb_node;
|
|
struct rb_node *parent = NULL;
|
|
struct binder_node *node;
|
|
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
|
|
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
|
|
+ __u32 flags = fp ? fp->flags : 0;
|
|
+ s8 priority;
|
|
+
|
|
+ assert_spin_locked(&proc->inner_lock);
|
|
|
|
while (*p) {
|
|
+
|
|
parent = *p;
|
|
node = rb_entry(parent, struct binder_node, rb_node);
|
|
|
|
@@ -898,39 +1292,86 @@ static struct binder_node *binder_new_no
|
|
p = &(*p)->rb_left;
|
|
else if (ptr > node->ptr)
|
|
p = &(*p)->rb_right;
|
|
- else
|
|
- return NULL;
|
|
+ else {
|
|
+ /*
|
|
+ * A matching node is already in
|
|
+ * the rb tree. Abandon the init
|
|
+ * and return it.
|
|
+ */
|
|
+ binder_inc_node_tmpref_ilocked(node);
|
|
+ return node;
|
|
+ }
|
|
}
|
|
-
|
|
- node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
- if (node == NULL)
|
|
- return NULL;
|
|
+ node = new_node;
|
|
binder_stats_created(BINDER_STAT_NODE);
|
|
+ node->tmp_refs++;
|
|
rb_link_node(&node->rb_node, parent, p);
|
|
rb_insert_color(&node->rb_node, &proc->nodes);
|
|
- node->debug_id = ++binder_last_id;
|
|
+ node->debug_id = atomic_inc_return(&binder_last_id);
|
|
node->proc = proc;
|
|
node->ptr = ptr;
|
|
node->cookie = cookie;
|
|
node->work.type = BINDER_WORK_NODE;
|
|
+ priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
|
|
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
|
|
+ FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
|
|
+ node->min_priority = to_kernel_prio(node->sched_policy, priority);
|
|
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
|
|
+ node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
|
|
+ spin_lock_init(&node->lock);
|
|
INIT_LIST_HEAD(&node->work.entry);
|
|
INIT_LIST_HEAD(&node->async_todo);
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"%d:%d node %d u%016llx c%016llx created\n",
|
|
proc->pid, current->pid, node->debug_id,
|
|
(u64)node->ptr, (u64)node->cookie);
|
|
+
|
|
return node;
|
|
}
|
|
|
|
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
|
|
- struct list_head *target_list)
|
|
+static struct binder_node *binder_new_node(struct binder_proc *proc,
|
|
+ struct flat_binder_object *fp)
|
|
{
|
|
+ struct binder_node *node;
|
|
+ struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
|
|
+
|
|
+ if (!new_node)
|
|
+ return NULL;
|
|
+ binder_inner_proc_lock(proc);
|
|
+ node = binder_init_node_ilocked(proc, new_node, fp);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ if (node != new_node)
|
|
+ /*
|
|
+ * The node was already added by another thread
|
|
+ */
|
|
+ kfree(new_node);
|
|
+
|
|
+ return node;
|
|
+}
|
|
+
|
|
+static void binder_free_node(struct binder_node *node)
|
|
+{
|
|
+ kfree(node);
|
|
+ binder_stats_deleted(BINDER_STAT_NODE);
|
|
+}
|
|
+
|
|
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
|
+ int internal,
|
|
+ struct list_head *target_list)
|
|
+{
|
|
+ struct binder_proc *proc = node->proc;
|
|
+
|
|
+ assert_spin_locked(&node->lock);
|
|
+ if (proc)
|
|
+ assert_spin_locked(&proc->inner_lock);
|
|
if (strong) {
|
|
if (internal) {
|
|
if (target_list == NULL &&
|
|
node->internal_strong_refs == 0 &&
|
|
- !(node == binder_context_mgr_node &&
|
|
- node->has_strong_ref)) {
|
|
+ !(node->proc &&
|
|
+ node == node->proc->context->
|
|
+ binder_context_mgr_node &&
|
|
+ node->has_strong_ref)) {
|
|
pr_err("invalid inc strong node for %d\n",
|
|
node->debug_id);
|
|
return -EINVAL;
|
|
@@ -939,8 +1380,8 @@ static int binder_inc_node(struct binder
|
|
} else
|
|
node->local_strong_refs++;
|
|
if (!node->has_strong_ref && target_list) {
|
|
- list_del_init(&node->work.entry);
|
|
- list_add_tail(&node->work.entry, target_list);
|
|
+ binder_dequeue_work_ilocked(&node->work);
|
|
+ binder_enqueue_work_ilocked(&node->work, target_list);
|
|
}
|
|
} else {
|
|
if (!internal)
|
|
@@ -951,58 +1392,169 @@ static int binder_inc_node(struct binder
|
|
node->debug_id);
|
|
return -EINVAL;
|
|
}
|
|
- list_add_tail(&node->work.entry, target_list);
|
|
+ binder_enqueue_work_ilocked(&node->work, target_list);
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
|
|
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
|
|
+ struct list_head *target_list)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ binder_node_inner_lock(node);
|
|
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
|
|
+ binder_node_inner_unlock(node);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static bool binder_dec_node_nilocked(struct binder_node *node,
|
|
+ int strong, int internal)
|
|
{
|
|
+ struct binder_proc *proc = node->proc;
|
|
+
|
|
+ assert_spin_locked(&node->lock);
|
|
+ if (proc)
|
|
+ assert_spin_locked(&proc->inner_lock);
|
|
if (strong) {
|
|
if (internal)
|
|
node->internal_strong_refs--;
|
|
else
|
|
node->local_strong_refs--;
|
|
if (node->local_strong_refs || node->internal_strong_refs)
|
|
- return 0;
|
|
+ return false;
|
|
} else {
|
|
if (!internal)
|
|
node->local_weak_refs--;
|
|
- if (node->local_weak_refs || !hlist_empty(&node->refs))
|
|
- return 0;
|
|
+ if (node->local_weak_refs || node->tmp_refs ||
|
|
+ !hlist_empty(&node->refs))
|
|
+ return false;
|
|
}
|
|
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
|
|
+
|
|
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
|
|
if (list_empty(&node->work.entry)) {
|
|
- list_add_tail(&node->work.entry, &node->proc->todo);
|
|
- wake_up_interruptible(&node->proc->wait);
|
|
+ binder_enqueue_work_ilocked(&node->work, &proc->todo);
|
|
+ binder_wakeup_proc_ilocked(proc);
|
|
}
|
|
} else {
|
|
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
|
|
- !node->local_weak_refs) {
|
|
- list_del_init(&node->work.entry);
|
|
- if (node->proc) {
|
|
- rb_erase(&node->rb_node, &node->proc->nodes);
|
|
+ !node->local_weak_refs && !node->tmp_refs) {
|
|
+ if (proc) {
|
|
+ binder_dequeue_work_ilocked(&node->work);
|
|
+ rb_erase(&node->rb_node, &proc->nodes);
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"refless node %d deleted\n",
|
|
node->debug_id);
|
|
} else {
|
|
+ BUG_ON(!list_empty(&node->work.entry));
|
|
+ spin_lock(&binder_dead_nodes_lock);
|
|
+ /*
|
|
+ * tmp_refs could have changed so
|
|
+ * check it again
|
|
+ */
|
|
+ if (node->tmp_refs) {
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
+ return false;
|
|
+ }
|
|
hlist_del(&node->dead_node);
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"dead node %d deleted\n",
|
|
node->debug_id);
|
|
}
|
|
- kfree(node);
|
|
- binder_stats_deleted(BINDER_STAT_NODE);
|
|
+ return true;
|
|
}
|
|
}
|
|
+ return false;
|
|
+}
|
|
|
|
- return 0;
|
|
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
|
|
+{
|
|
+ bool free_node;
|
|
+
|
|
+ binder_node_inner_lock(node);
|
|
+ free_node = binder_dec_node_nilocked(node, strong, internal);
|
|
+ binder_node_inner_unlock(node);
|
|
+ if (free_node)
|
|
+ binder_free_node(node);
|
|
}
|
|
|
|
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
|
|
+{
|
|
+ /*
|
|
+ * No call to binder_inc_node() is needed since we
|
|
+ * don't need to inform userspace of any changes to
|
|
+ * tmp_refs
|
|
+ */
|
|
+ node->tmp_refs++;
|
|
+}
|
|
|
|
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
|
|
- u32 desc, bool need_strong_ref)
|
|
+/**
|
|
+ * binder_inc_node_tmpref() - take a temporary reference on node
|
|
+ * @node: node to reference
|
|
+ *
|
|
+ * Take reference on node to prevent the node from being freed
|
|
+ * while referenced only by a local variable. The inner lock is
|
|
+ * needed to serialize with the node work on the queue (which
|
|
+ * isn't needed after the node is dead). If the node is dead
|
|
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
|
|
+ * node->tmp_refs against dead-node-only cases where the node
|
|
+ * lock cannot be acquired (eg traversing the dead node list to
|
|
+ * print nodes)
|
|
+ */
|
|
+static void binder_inc_node_tmpref(struct binder_node *node)
|
|
+{
|
|
+ binder_node_lock(node);
|
|
+ if (node->proc)
|
|
+ binder_inner_proc_lock(node->proc);
|
|
+ else
|
|
+ spin_lock(&binder_dead_nodes_lock);
|
|
+ binder_inc_node_tmpref_ilocked(node);
|
|
+ if (node->proc)
|
|
+ binder_inner_proc_unlock(node->proc);
|
|
+ else
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
+ binder_node_unlock(node);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_dec_node_tmpref() - remove a temporary reference on node
|
|
+ * @node: node to reference
|
|
+ *
|
|
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
|
|
+ */
|
|
+static void binder_dec_node_tmpref(struct binder_node *node)
|
|
+{
|
|
+ bool free_node;
|
|
+
|
|
+ binder_node_inner_lock(node);
|
|
+ if (!node->proc)
|
|
+ spin_lock(&binder_dead_nodes_lock);
|
|
+ node->tmp_refs--;
|
|
+ BUG_ON(node->tmp_refs < 0);
|
|
+ if (!node->proc)
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
+ /*
|
|
+ * Call binder_dec_node() to check if all refcounts are 0
|
|
+ * and cleanup is needed. Calling with strong=0 and internal=1
|
|
+ * causes no actual reference to be released in binder_dec_node().
|
|
+ * If that changes, a change is needed here too.
|
|
+ */
|
|
+ free_node = binder_dec_node_nilocked(node, 0, 1);
|
|
+ binder_node_inner_unlock(node);
|
|
+ if (free_node)
|
|
+ binder_free_node(node);
|
|
+}
|
|
+
|
|
+static void binder_put_node(struct binder_node *node)
|
|
+{
|
|
+ binder_dec_node_tmpref(node);
|
|
+}
|
|
+
|
|
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
|
|
+ u32 desc, bool need_strong_ref)
|
|
{
|
|
struct rb_node *n = proc->refs_by_desc.rb_node;
|
|
struct binder_ref *ref;
|
|
@@ -1010,11 +1562,11 @@ static struct binder_ref *binder_get_ref
|
|
while (n) {
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
|
|
|
- if (desc < ref->desc) {
|
|
+ if (desc < ref->data.desc) {
|
|
n = n->rb_left;
|
|
- } else if (desc > ref->desc) {
|
|
+ } else if (desc > ref->data.desc) {
|
|
n = n->rb_right;
|
|
- } else if (need_strong_ref && !ref->strong) {
|
|
+ } else if (need_strong_ref && !ref->data.strong) {
|
|
binder_user_error("tried to use weak ref as strong ref\n");
|
|
return NULL;
|
|
} else {
|
|
@@ -1024,13 +1576,34 @@ static struct binder_ref *binder_get_ref
|
|
return NULL;
|
|
}
|
|
|
|
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
|
|
- struct binder_node *node)
|
|
+/**
|
|
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
|
|
+ * @proc: binder_proc that owns the ref
|
|
+ * @node: binder_node of target
|
|
+ * @new_ref: newly allocated binder_ref to be initialized or %NULL
|
|
+ *
|
|
+ * Look up the ref for the given node and return it if it exists
|
|
+ *
|
|
+ * If it doesn't exist and the caller provides a newly allocated
|
|
+ * ref, initialize the fields of the newly allocated ref and insert
|
|
+ * into the given proc rb_trees and node refs list.
|
|
+ *
|
|
+ * Return: the ref for node. It is possible that another thread
|
|
+ * allocated/initialized the ref first in which case the
|
|
+ * returned ref would be different than the passed-in
|
|
+ * new_ref. new_ref must be kfree'd by the caller in
|
|
+ * this case.
|
|
+ */
|
|
+static struct binder_ref *binder_get_ref_for_node_olocked(
|
|
+ struct binder_proc *proc,
|
|
+ struct binder_node *node,
|
|
+ struct binder_ref *new_ref)
|
|
{
|
|
- struct rb_node *n;
|
|
+ struct binder_context *context = proc->context;
|
|
struct rb_node **p = &proc->refs_by_node.rb_node;
|
|
struct rb_node *parent = NULL;
|
|
- struct binder_ref *ref, *new_ref;
|
|
+ struct binder_ref *ref;
|
|
+ struct rb_node *n;
|
|
|
|
while (*p) {
|
|
parent = *p;
|
|
@@ -1043,22 +1616,22 @@ static struct binder_ref *binder_get_ref
|
|
else
|
|
return ref;
|
|
}
|
|
- new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
|
- if (new_ref == NULL)
|
|
+ if (!new_ref)
|
|
return NULL;
|
|
+
|
|
binder_stats_created(BINDER_STAT_REF);
|
|
- new_ref->debug_id = ++binder_last_id;
|
|
+ new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
|
|
new_ref->proc = proc;
|
|
new_ref->node = node;
|
|
rb_link_node(&new_ref->rb_node_node, parent, p);
|
|
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
|
|
|
|
- new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
|
|
+ new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
|
|
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
|
- if (ref->desc > new_ref->desc)
|
|
+ if (ref->data.desc > new_ref->data.desc)
|
|
break;
|
|
- new_ref->desc = ref->desc + 1;
|
|
+ new_ref->data.desc = ref->data.desc + 1;
|
|
}
|
|
|
|
p = &proc->refs_by_desc.rb_node;
|
|
@@ -1066,121 +1639,423 @@ static struct binder_ref *binder_get_ref
|
|
parent = *p;
|
|
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
|
|
|
|
- if (new_ref->desc < ref->desc)
|
|
+ if (new_ref->data.desc < ref->data.desc)
|
|
p = &(*p)->rb_left;
|
|
- else if (new_ref->desc > ref->desc)
|
|
+ else if (new_ref->data.desc > ref->data.desc)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
BUG();
|
|
}
|
|
rb_link_node(&new_ref->rb_node_desc, parent, p);
|
|
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
|
|
- if (node) {
|
|
- hlist_add_head(&new_ref->node_entry, &node->refs);
|
|
|
|
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
- "%d new ref %d desc %d for node %d\n",
|
|
- proc->pid, new_ref->debug_id, new_ref->desc,
|
|
- node->debug_id);
|
|
- } else {
|
|
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
- "%d new ref %d desc %d for dead node\n",
|
|
- proc->pid, new_ref->debug_id, new_ref->desc);
|
|
- }
|
|
+ binder_node_lock(node);
|
|
+ hlist_add_head(&new_ref->node_entry, &node->refs);
|
|
+
|
|
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
+ "%d new ref %d desc %d for node %d\n",
|
|
+ proc->pid, new_ref->data.debug_id, new_ref->data.desc,
|
|
+ node->debug_id);
|
|
+ binder_node_unlock(node);
|
|
return new_ref;
|
|
}
|
|
|
|
-static void binder_delete_ref(struct binder_ref *ref)
|
|
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
|
|
{
|
|
+ bool delete_node = false;
|
|
+
|
|
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
"%d delete ref %d desc %d for node %d\n",
|
|
- ref->proc->pid, ref->debug_id, ref->desc,
|
|
+ ref->proc->pid, ref->data.debug_id, ref->data.desc,
|
|
ref->node->debug_id);
|
|
|
|
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
|
|
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
|
|
- if (ref->strong)
|
|
- binder_dec_node(ref->node, 1, 1);
|
|
+
|
|
+ binder_node_inner_lock(ref->node);
|
|
+ if (ref->data.strong)
|
|
+ binder_dec_node_nilocked(ref->node, 1, 1);
|
|
+
|
|
hlist_del(&ref->node_entry);
|
|
- binder_dec_node(ref->node, 0, 1);
|
|
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
|
|
+ binder_node_inner_unlock(ref->node);
|
|
+ /*
|
|
+ * Clear ref->node unless we want the caller to free the node
|
|
+ */
|
|
+ if (!delete_node) {
|
|
+ /*
|
|
+ * The caller uses ref->node to determine
|
|
+ * whether the node needs to be freed. Clear
|
|
+ * it since the node is still alive.
|
|
+ */
|
|
+ ref->node = NULL;
|
|
+ }
|
|
+
|
|
if (ref->death) {
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"%d delete ref %d desc %d has death notification\n",
|
|
- ref->proc->pid, ref->debug_id, ref->desc);
|
|
- list_del(&ref->death->work.entry);
|
|
- kfree(ref->death);
|
|
+ ref->proc->pid, ref->data.debug_id,
|
|
+ ref->data.desc);
|
|
+ binder_dequeue_work(ref->proc, &ref->death->work);
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
}
|
|
- kfree(ref);
|
|
binder_stats_deleted(BINDER_STAT_REF);
|
|
}
|
|
|
|
-static int binder_inc_ref(struct binder_ref *ref, int strong,
|
|
- struct list_head *target_list)
|
|
+/**
|
|
+ * binder_inc_ref_olocked() - increment the ref for given handle
|
|
+ * @ref: ref to be incremented
|
|
+ * @strong: if true, strong increment, else weak
|
|
+ * @target_list: list to queue node work on
|
|
+ *
|
|
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
|
|
+ *
|
|
+ * Return: 0, if successful, else errno
|
|
+ */
|
|
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
|
|
+ struct list_head *target_list)
|
|
{
|
|
int ret;
|
|
|
|
if (strong) {
|
|
- if (ref->strong == 0) {
|
|
+ if (ref->data.strong == 0) {
|
|
ret = binder_inc_node(ref->node, 1, 1, target_list);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
- ref->strong++;
|
|
+ ref->data.strong++;
|
|
} else {
|
|
- if (ref->weak == 0) {
|
|
+ if (ref->data.weak == 0) {
|
|
ret = binder_inc_node(ref->node, 0, 1, target_list);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
- ref->weak++;
|
|
+ ref->data.weak++;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
-
|
|
-static int binder_dec_ref(struct binder_ref *ref, int strong)
|
|
+/**
|
|
+ * binder_dec_ref() - dec the ref for given handle
|
|
+ * @ref: ref to be decremented
|
|
+ * @strong: if true, strong decrement, else weak
|
|
+ *
|
|
+ * Decrement the ref.
|
|
+ *
|
|
+ * Return: true if ref is cleaned up and ready to be freed
|
|
+ */
|
|
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
|
|
{
|
|
if (strong) {
|
|
- if (ref->strong == 0) {
|
|
+ if (ref->data.strong == 0) {
|
|
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
|
|
- ref->proc->pid, ref->debug_id,
|
|
- ref->desc, ref->strong, ref->weak);
|
|
- return -EINVAL;
|
|
- }
|
|
- ref->strong--;
|
|
- if (ref->strong == 0) {
|
|
- int ret;
|
|
-
|
|
- ret = binder_dec_node(ref->node, strong, 1);
|
|
- if (ret)
|
|
- return ret;
|
|
+ ref->proc->pid, ref->data.debug_id,
|
|
+ ref->data.desc, ref->data.strong,
|
|
+ ref->data.weak);
|
|
+ return false;
|
|
}
|
|
+ ref->data.strong--;
|
|
+ if (ref->data.strong == 0)
|
|
+ binder_dec_node(ref->node, strong, 1);
|
|
} else {
|
|
- if (ref->weak == 0) {
|
|
+ if (ref->data.weak == 0) {
|
|
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
|
|
- ref->proc->pid, ref->debug_id,
|
|
- ref->desc, ref->strong, ref->weak);
|
|
- return -EINVAL;
|
|
+ ref->proc->pid, ref->data.debug_id,
|
|
+ ref->data.desc, ref->data.strong,
|
|
+ ref->data.weak);
|
|
+ return false;
|
|
}
|
|
- ref->weak--;
|
|
+ ref->data.weak--;
|
|
}
|
|
- if (ref->strong == 0 && ref->weak == 0)
|
|
- binder_delete_ref(ref);
|
|
- return 0;
|
|
+ if (ref->data.strong == 0 && ref->data.weak == 0) {
|
|
+ binder_cleanup_ref_olocked(ref);
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
}
|
|
|
|
-static void binder_pop_transaction(struct binder_thread *target_thread,
|
|
- struct binder_transaction *t)
|
|
+/**
|
|
+ * binder_get_node_from_ref() - get the node from the given proc/desc
|
|
+ * @proc: proc containing the ref
|
|
+ * @desc: the handle associated with the ref
|
|
+ * @need_strong_ref: if true, only return node if ref is strong
|
|
+ * @rdata: the id/refcount data for the ref
|
|
+ *
|
|
+ * Given a proc and ref handle, return the associated binder_node
|
|
+ *
|
|
+ * Return: a binder_node or NULL if not found or not strong when strong required
|
|
+ */
|
|
+static struct binder_node *binder_get_node_from_ref(
|
|
+ struct binder_proc *proc,
|
|
+ u32 desc, bool need_strong_ref,
|
|
+ struct binder_ref_data *rdata)
|
|
{
|
|
- if (target_thread) {
|
|
- BUG_ON(target_thread->transaction_stack != t);
|
|
- BUG_ON(target_thread->transaction_stack->from != target_thread);
|
|
- target_thread->transaction_stack =
|
|
- target_thread->transaction_stack->from_parent;
|
|
- t->from = NULL;
|
|
+ struct binder_node *node;
|
|
+ struct binder_ref *ref;
|
|
+
|
|
+ binder_proc_lock(proc);
|
|
+ ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
|
|
+ if (!ref)
|
|
+ goto err_no_ref;
|
|
+ node = ref->node;
|
|
+ /*
|
|
+ * Take an implicit reference on the node to ensure
|
|
+ * it stays alive until the call to binder_put_node()
|
|
+ */
|
|
+ binder_inc_node_tmpref(node);
|
|
+ if (rdata)
|
|
+ *rdata = ref->data;
|
|
+ binder_proc_unlock(proc);
|
|
+
|
|
+ return node;
|
|
+
|
|
+err_no_ref:
|
|
+ binder_proc_unlock(proc);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_free_ref() - free the binder_ref
|
|
+ * @ref: ref to free
|
|
+ *
|
|
+ * Free the binder_ref. Free the binder_node indicated by ref->node
|
|
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
|
|
+ */
|
|
+static void binder_free_ref(struct binder_ref *ref)
|
|
+{
|
|
+ if (ref->node)
|
|
+ binder_free_node(ref->node);
|
|
+ kfree(ref->death);
|
|
+ kfree(ref);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
|
|
+ * @proc: proc containing the ref
|
|
+ * @desc: the handle associated with the ref
|
|
+ * @increment: true=inc reference, false=dec reference
|
|
+ * @strong: true=strong reference, false=weak reference
|
|
+ * @rdata: the id/refcount data for the ref
|
|
+ *
|
|
+ * Given a proc and ref handle, increment or decrement the ref
|
|
+ * according to "increment" arg.
|
|
+ *
|
|
+ * Return: 0 if successful, else errno
|
|
+ */
|
|
+static int binder_update_ref_for_handle(struct binder_proc *proc,
|
|
+ uint32_t desc, bool increment, bool strong,
|
|
+ struct binder_ref_data *rdata)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct binder_ref *ref;
|
|
+ bool delete_ref = false;
|
|
+
|
|
+ binder_proc_lock(proc);
|
|
+ ref = binder_get_ref_olocked(proc, desc, strong);
|
|
+ if (!ref) {
|
|
+ ret = -EINVAL;
|
|
+ goto err_no_ref;
|
|
}
|
|
- t->need_reply = 0;
|
|
+ if (increment)
|
|
+ ret = binder_inc_ref_olocked(ref, strong, NULL);
|
|
+ else
|
|
+ delete_ref = binder_dec_ref_olocked(ref, strong);
|
|
+
|
|
+ if (rdata)
|
|
+ *rdata = ref->data;
|
|
+ binder_proc_unlock(proc);
|
|
+
|
|
+ if (delete_ref)
|
|
+ binder_free_ref(ref);
|
|
+ return ret;
|
|
+
|
|
+err_no_ref:
|
|
+ binder_proc_unlock(proc);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_dec_ref_for_handle() - dec the ref for given handle
|
|
+ * @proc: proc containing the ref
|
|
+ * @desc: the handle associated with the ref
|
|
+ * @strong: true=strong reference, false=weak reference
|
|
+ * @rdata: the id/refcount data for the ref
|
|
+ *
|
|
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
|
|
+ *
|
|
+ * Return: 0 if successful, else errno
|
|
+ */
|
|
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
|
|
+ uint32_t desc, bool strong, struct binder_ref_data *rdata)
|
|
+{
|
|
+ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
|
|
+}
|
|
+
|
|
+
|
|
+/**
|
|
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
|
|
+ * @proc: proc containing the ref
|
|
+ * @node: target node
|
|
+ * @strong: true=strong reference, false=weak reference
|
|
+ * @target_list: worklist to use if node is incremented
|
|
+ * @rdata: the id/refcount data for the ref
|
|
+ *
|
|
+ * Given a proc and node, increment the ref. Create the ref if it
|
|
+ * doesn't already exist
|
|
+ *
|
|
+ * Return: 0 if successful, else errno
|
|
+ */
|
|
+static int binder_inc_ref_for_node(struct binder_proc *proc,
|
|
+ struct binder_node *node,
|
|
+ bool strong,
|
|
+ struct list_head *target_list,
|
|
+ struct binder_ref_data *rdata)
|
|
+{
|
|
+ struct binder_ref *ref;
|
|
+ struct binder_ref *new_ref = NULL;
|
|
+ int ret = 0;
|
|
+
|
|
+ binder_proc_lock(proc);
|
|
+ ref = binder_get_ref_for_node_olocked(proc, node, NULL);
|
|
+ if (!ref) {
|
|
+ binder_proc_unlock(proc);
|
|
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
|
+ if (!new_ref)
|
|
+ return -ENOMEM;
|
|
+ binder_proc_lock(proc);
|
|
+ ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
|
|
+ }
|
|
+ ret = binder_inc_ref_olocked(ref, strong, target_list);
|
|
+ *rdata = ref->data;
|
|
+ binder_proc_unlock(proc);
|
|
+ if (new_ref && ref != new_ref)
|
|
+ /*
|
|
+ * Another thread created the ref first so
|
|
+ * free the one we allocated
|
|
+ */
|
|
+ kfree(new_ref);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
|
|
+ struct binder_transaction *t)
|
|
+{
|
|
+ BUG_ON(!target_thread);
|
|
+ assert_spin_locked(&target_thread->proc->inner_lock);
|
|
+ BUG_ON(target_thread->transaction_stack != t);
|
|
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
|
|
+ target_thread->transaction_stack =
|
|
+ target_thread->transaction_stack->from_parent;
|
|
+ t->from = NULL;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
|
|
+ * @thread: thread to decrement
|
|
+ *
|
|
+ * A thread needs to be kept alive while being used to create or
|
|
+ * handle a transaction. binder_get_txn_from() is used to safely
|
|
+ * extract t->from from a binder_transaction and keep the thread
|
|
+ * indicated by t->from from being freed. When done with that
|
|
+ * binder_thread, this function is called to decrement the
|
|
+ * tmp_ref and free if appropriate (thread has been released
|
|
+ * and no transaction being processed by the driver)
|
|
+ */
|
|
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
|
|
+{
|
|
+ /*
|
|
+ * atomic is used to protect the counter value while
|
|
+ * it cannot reach zero or thread->is_dead is false
|
|
+ */
|
|
+ binder_inner_proc_lock(thread->proc);
|
|
+ atomic_dec(&thread->tmp_ref);
|
|
+ if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
|
|
+ binder_inner_proc_unlock(thread->proc);
|
|
+ binder_free_thread(thread);
|
|
+ return;
|
|
+ }
|
|
+ binder_inner_proc_unlock(thread->proc);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
|
|
+ * @proc: proc to decrement
|
|
+ *
|
|
+ * A binder_proc needs to be kept alive while being used to create or
|
|
+ * handle a transaction. proc->tmp_ref is incremented when
|
|
+ * creating a new transaction or the binder_proc is currently in-use
|
|
+ * by threads that are being released. When done with the binder_proc,
|
|
+ * this function is called to decrement the counter and free the
|
|
+ * proc if appropriate (proc has been released, all threads have
|
|
+ * been released and not currenly in-use to process a transaction).
|
|
+ */
|
|
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
|
|
+{
|
|
+ binder_inner_proc_lock(proc);
|
|
+ proc->tmp_ref--;
|
|
+ if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
|
|
+ !proc->tmp_ref) {
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ binder_free_proc(proc);
|
|
+ return;
|
|
+ }
|
|
+ binder_inner_proc_unlock(proc);
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
|
|
+ * @t: binder transaction for t->from
|
|
+ *
|
|
+ * Atomically return the "from" thread and increment the tmp_ref
|
|
+ * count for the thread to ensure it stays alive until
|
|
+ * binder_thread_dec_tmpref() is called.
|
|
+ *
|
|
+ * Return: the value of t->from
|
|
+ */
|
|
+static struct binder_thread *binder_get_txn_from(
|
|
+ struct binder_transaction *t)
|
|
+{
|
|
+ struct binder_thread *from;
|
|
+
|
|
+ spin_lock(&t->lock);
|
|
+ from = t->from;
|
|
+ if (from)
|
|
+ atomic_inc(&from->tmp_ref);
|
|
+ spin_unlock(&t->lock);
|
|
+ return from;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
|
|
+ * @t: binder transaction for t->from
|
|
+ *
|
|
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
|
|
+ * to guarantee that the thread cannot be released while operating on it.
|
|
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
|
|
+ * as well as call binder_dec_thread_txn() to release the reference.
|
|
+ *
|
|
+ * Return: the value of t->from
|
|
+ */
|
|
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
|
|
+ struct binder_transaction *t)
|
|
+{
|
|
+ struct binder_thread *from;
|
|
+
|
|
+ from = binder_get_txn_from(t);
|
|
+ if (!from)
|
|
+ return NULL;
|
|
+ binder_inner_proc_lock(from->proc);
|
|
+ if (t->from) {
|
|
+ BUG_ON(from != t->from);
|
|
+ return from;
|
|
+ }
|
|
+ binder_inner_proc_unlock(from->proc);
|
|
+ binder_thread_dec_tmpref(from);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void binder_free_transaction(struct binder_transaction *t)
|
|
+{
|
|
if (t->buffer)
|
|
t->buffer->transaction = NULL;
|
|
kfree(t);
|
|
@@ -1195,30 +2070,28 @@ static void binder_send_failed_reply(str
|
|
|
|
BUG_ON(t->flags & TF_ONE_WAY);
|
|
while (1) {
|
|
- target_thread = t->from;
|
|
+ target_thread = binder_get_txn_from_and_acq_inner(t);
|
|
if (target_thread) {
|
|
- if (target_thread->return_error != BR_OK &&
|
|
- target_thread->return_error2 == BR_OK) {
|
|
- target_thread->return_error2 =
|
|
- target_thread->return_error;
|
|
- target_thread->return_error = BR_OK;
|
|
- }
|
|
- if (target_thread->return_error == BR_OK) {
|
|
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
- "send failed reply for transaction %d to %d:%d\n",
|
|
- t->debug_id,
|
|
- target_thread->proc->pid,
|
|
- target_thread->pid);
|
|
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
+ "send failed reply for transaction %d to %d:%d\n",
|
|
+ t->debug_id,
|
|
+ target_thread->proc->pid,
|
|
+ target_thread->pid);
|
|
|
|
- binder_pop_transaction(target_thread, t);
|
|
- target_thread->return_error = error_code;
|
|
+ binder_pop_transaction_ilocked(target_thread, t);
|
|
+ if (target_thread->reply_error.cmd == BR_OK) {
|
|
+ target_thread->reply_error.cmd = error_code;
|
|
+ binder_enqueue_work_ilocked(
|
|
+ &target_thread->reply_error.work,
|
|
+ &target_thread->todo);
|
|
wake_up_interruptible(&target_thread->wait);
|
|
} else {
|
|
- pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
|
|
- target_thread->proc->pid,
|
|
- target_thread->pid,
|
|
- target_thread->return_error);
|
|
+ WARN(1, "Unexpected reply error: %u\n",
|
|
+ target_thread->reply_error.cmd);
|
|
}
|
|
+ binder_inner_proc_unlock(target_thread->proc);
|
|
+ binder_thread_dec_tmpref(target_thread);
|
|
+ binder_free_transaction(t);
|
|
return;
|
|
}
|
|
next = t->from_parent;
|
|
@@ -1227,7 +2100,7 @@ static void binder_send_failed_reply(str
|
|
"send failed reply for transaction %d, target dead\n",
|
|
t->debug_id);
|
|
|
|
- binder_pop_transaction(target_thread, t);
|
|
+ binder_free_transaction(t);
|
|
if (next == NULL) {
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"reply failed, no target thread at root\n");
|
|
@@ -1240,11 +2113,158 @@ static void binder_send_failed_reply(str
|
|
}
|
|
}
|
|
|
|
+/**
|
|
+ * binder_validate_object() - checks for a valid metadata object in a buffer.
|
|
+ * @buffer: binder_buffer that we're parsing.
|
|
+ * @offset: offset in the buffer at which to validate an object.
|
|
+ *
|
|
+ * Return: If there's a valid metadata object at @offset in @buffer, the
|
|
+ * size of that object. Otherwise, it returns zero.
|
|
+ */
|
|
+static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
|
|
+{
|
|
+ /* Check if we can read a header first */
|
|
+ struct binder_object_header *hdr;
|
|
+ size_t object_size = 0;
|
|
+
|
|
+ if (offset > buffer->data_size - sizeof(*hdr) ||
|
|
+ buffer->data_size < sizeof(*hdr) ||
|
|
+ !IS_ALIGNED(offset, sizeof(u32)))
|
|
+ return 0;
|
|
+
|
|
+ /* Ok, now see if we can read a complete object. */
|
|
+ hdr = (struct binder_object_header *)(buffer->data + offset);
|
|
+ switch (hdr->type) {
|
|
+ case BINDER_TYPE_BINDER:
|
|
+ case BINDER_TYPE_WEAK_BINDER:
|
|
+ case BINDER_TYPE_HANDLE:
|
|
+ case BINDER_TYPE_WEAK_HANDLE:
|
|
+ object_size = sizeof(struct flat_binder_object);
|
|
+ break;
|
|
+ case BINDER_TYPE_FD:
|
|
+ object_size = sizeof(struct binder_fd_object);
|
|
+ break;
|
|
+ case BINDER_TYPE_PTR:
|
|
+ object_size = sizeof(struct binder_buffer_object);
|
|
+ break;
|
|
+ case BINDER_TYPE_FDA:
|
|
+ object_size = sizeof(struct binder_fd_array_object);
|
|
+ break;
|
|
+ default:
|
|
+ return 0;
|
|
+ }
|
|
+ if (offset <= buffer->data_size - object_size &&
|
|
+ buffer->data_size >= object_size)
|
|
+ return object_size;
|
|
+ else
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
|
|
+ * @b: binder_buffer containing the object
|
|
+ * @index: index in offset array at which the binder_buffer_object is
|
|
+ * located
|
|
+ * @start: points to the start of the offset array
|
|
+ * @num_valid: the number of valid offsets in the offset array
|
|
+ *
|
|
+ * Return: If @index is within the valid range of the offset array
|
|
+ * described by @start and @num_valid, and if there's a valid
|
|
+ * binder_buffer_object at the offset found in index @index
|
|
+ * of the offset array, that object is returned. Otherwise,
|
|
+ * %NULL is returned.
|
|
+ * Note that the offset found in index @index itself is not
|
|
+ * verified; this function assumes that @num_valid elements
|
|
+ * from @start were previously verified to have valid offsets.
|
|
+ */
|
|
+static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
|
|
+ binder_size_t index,
|
|
+ binder_size_t *start,
|
|
+ binder_size_t num_valid)
|
|
+{
|
|
+ struct binder_buffer_object *buffer_obj;
|
|
+ binder_size_t *offp;
|
|
+
|
|
+ if (index >= num_valid)
|
|
+ return NULL;
|
|
+
|
|
+ offp = start + index;
|
|
+ buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
|
|
+ if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
|
|
+ return NULL;
|
|
+
|
|
+ return buffer_obj;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_validate_fixup() - validates pointer/fd fixups happen in order.
|
|
+ * @b: transaction buffer
|
|
+ * @objects_start start of objects buffer
|
|
+ * @buffer: binder_buffer_object in which to fix up
|
|
+ * @offset: start offset in @buffer to fix up
|
|
+ * @last_obj: last binder_buffer_object that we fixed up in
|
|
+ * @last_min_offset: minimum fixup offset in @last_obj
|
|
+ *
|
|
+ * Return: %true if a fixup in buffer @buffer at offset @offset is
|
|
+ * allowed.
|
|
+ *
|
|
+ * For safety reasons, we only allow fixups inside a buffer to happen
|
|
+ * at increasing offsets; additionally, we only allow fixup on the last
|
|
+ * buffer object that was verified, or one of its parents.
|
|
+ *
|
|
+ * Example of what is allowed:
|
|
+ *
|
|
+ * A
|
|
+ * B (parent = A, offset = 0)
|
|
+ * C (parent = A, offset = 16)
|
|
+ * D (parent = C, offset = 0)
|
|
+ * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
|
|
+ *
|
|
+ * Examples of what is not allowed:
|
|
+ *
|
|
+ * Decreasing offsets within the same parent:
|
|
+ * A
|
|
+ * C (parent = A, offset = 16)
|
|
+ * B (parent = A, offset = 0) // decreasing offset within A
|
|
+ *
|
|
+ * Referring to a parent that wasn't the last object or any of its parents:
|
|
+ * A
|
|
+ * B (parent = A, offset = 0)
|
|
+ * C (parent = A, offset = 0)
|
|
+ * C (parent = A, offset = 16)
|
|
+ * D (parent = B, offset = 0) // B is not A or any of A's parents
|
|
+ */
|
|
+static bool binder_validate_fixup(struct binder_buffer *b,
|
|
+ binder_size_t *objects_start,
|
|
+ struct binder_buffer_object *buffer,
|
|
+ binder_size_t fixup_offset,
|
|
+ struct binder_buffer_object *last_obj,
|
|
+ binder_size_t last_min_offset)
|
|
+{
|
|
+ if (!last_obj) {
|
|
+ /* Nothing to fix up in */
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ while (last_obj != buffer) {
|
|
+ /*
|
|
+ * Safe to retrieve the parent of last_obj, since it
|
|
+ * was already previously verified by the driver.
|
|
+ */
|
|
+ if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
|
|
+ return false;
|
|
+ last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
|
|
+ last_obj = (struct binder_buffer_object *)
|
|
+ (b->data + *(objects_start + last_obj->parent));
|
|
+ }
|
|
+ return (fixup_offset >= last_min_offset);
|
|
+}
|
|
+
|
|
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|
struct binder_buffer *buffer,
|
|
binder_size_t *failed_at)
|
|
{
|
|
- binder_size_t *offp, *off_end;
|
|
+ binder_size_t *offp, *off_start, *off_end;
|
|
int debug_id = buffer->debug_id;
|
|
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
@@ -1255,28 +2275,30 @@ static void binder_transaction_buffer_re
|
|
if (buffer->target_node)
|
|
binder_dec_node(buffer->target_node, 1, 0);
|
|
|
|
- offp = (binder_size_t *)(buffer->data +
|
|
- ALIGN(buffer->data_size, sizeof(void *)));
|
|
+ off_start = (binder_size_t *)(buffer->data +
|
|
+ ALIGN(buffer->data_size, sizeof(void *)));
|
|
if (failed_at)
|
|
off_end = failed_at;
|
|
else
|
|
- off_end = (void *)offp + buffer->offsets_size;
|
|
- for (; offp < off_end; offp++) {
|
|
- struct flat_binder_object *fp;
|
|
+ off_end = (void *)off_start + buffer->offsets_size;
|
|
+ for (offp = off_start; offp < off_end; offp++) {
|
|
+ struct binder_object_header *hdr;
|
|
+ size_t object_size = binder_validate_object(buffer, *offp);
|
|
|
|
- if (*offp > buffer->data_size - sizeof(*fp) ||
|
|
- buffer->data_size < sizeof(*fp) ||
|
|
- !IS_ALIGNED(*offp, sizeof(u32))) {
|
|
- pr_err("transaction release %d bad offset %lld, size %zd\n",
|
|
+ if (object_size == 0) {
|
|
+ pr_err("transaction release %d bad object at offset %lld, size %zd\n",
|
|
debug_id, (u64)*offp, buffer->data_size);
|
|
continue;
|
|
}
|
|
- fp = (struct flat_binder_object *)(buffer->data + *offp);
|
|
- switch (fp->type) {
|
|
+ hdr = (struct binder_object_header *)(buffer->data + *offp);
|
|
+ switch (hdr->type) {
|
|
case BINDER_TYPE_BINDER:
|
|
case BINDER_TYPE_WEAK_BINDER: {
|
|
- struct binder_node *node = binder_get_node(proc, fp->binder);
|
|
+ struct flat_binder_object *fp;
|
|
+ struct binder_node *node;
|
|
|
|
+ fp = to_flat_binder_object(hdr);
|
|
+ node = binder_get_node(proc, fp->binder);
|
|
if (node == NULL) {
|
|
pr_err("transaction release %d bad node %016llx\n",
|
|
debug_id, (u64)fp->binder);
|
|
@@ -1285,90 +2307,564 @@ static void binder_transaction_buffer_re
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
" node %d u%016llx\n",
|
|
node->debug_id, (u64)node->ptr);
|
|
- binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
|
|
+ binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
|
|
+ 0);
|
|
+ binder_put_node(node);
|
|
} break;
|
|
case BINDER_TYPE_HANDLE:
|
|
case BINDER_TYPE_WEAK_HANDLE: {
|
|
- struct binder_ref *ref;
|
|
+ struct flat_binder_object *fp;
|
|
+ struct binder_ref_data rdata;
|
|
+ int ret;
|
|
|
|
- ref = binder_get_ref(proc, fp->handle,
|
|
- fp->type == BINDER_TYPE_HANDLE);
|
|
+ fp = to_flat_binder_object(hdr);
|
|
+ ret = binder_dec_ref_for_handle(proc, fp->handle,
|
|
+ hdr->type == BINDER_TYPE_HANDLE, &rdata);
|
|
|
|
- if (ref == NULL) {
|
|
- pr_err("transaction release %d bad handle %d\n",
|
|
- debug_id, fp->handle);
|
|
+ if (ret) {
|
|
+ pr_err("transaction release %d bad handle %d, ret = %d\n",
|
|
+ debug_id, fp->handle, ret);
|
|
break;
|
|
}
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
- " ref %d desc %d (node %d)\n",
|
|
- ref->debug_id, ref->desc, ref->node->debug_id);
|
|
- binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
|
|
+ " ref %d desc %d\n",
|
|
+ rdata.debug_id, rdata.desc);
|
|
} break;
|
|
|
|
- case BINDER_TYPE_FD:
|
|
+ case BINDER_TYPE_FD: {
|
|
+ struct binder_fd_object *fp = to_binder_fd_object(hdr);
|
|
+
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
- " fd %d\n", fp->handle);
|
|
+ " fd %d\n", fp->fd);
|
|
if (failed_at)
|
|
- task_close_fd(proc, fp->handle);
|
|
+ task_close_fd(proc, fp->fd);
|
|
+ } break;
|
|
+ case BINDER_TYPE_PTR:
|
|
+ /*
|
|
+ * Nothing to do here, this will get cleaned up when the
|
|
+ * transaction buffer gets freed
|
|
+ */
|
|
break;
|
|
+ case BINDER_TYPE_FDA: {
|
|
+ struct binder_fd_array_object *fda;
|
|
+ struct binder_buffer_object *parent;
|
|
+ uintptr_t parent_buffer;
|
|
+ u32 *fd_array;
|
|
+ size_t fd_index;
|
|
+ binder_size_t fd_buf_size;
|
|
+
|
|
+ fda = to_binder_fd_array_object(hdr);
|
|
+ parent = binder_validate_ptr(buffer, fda->parent,
|
|
+ off_start,
|
|
+ offp - off_start);
|
|
+ if (!parent) {
|
|
+ pr_err("transaction release %d bad parent offset",
|
|
+ debug_id);
|
|
+ continue;
|
|
+ }
|
|
+ /*
|
|
+ * Since the parent was already fixed up, convert it
|
|
+ * back to kernel address space to access it
|
|
+ */
|
|
+ parent_buffer = parent->buffer -
|
|
+ binder_alloc_get_user_buffer_offset(
|
|
+ &proc->alloc);
|
|
|
|
+ fd_buf_size = sizeof(u32) * fda->num_fds;
|
|
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
|
|
+ pr_err("transaction release %d invalid number of fds (%lld)\n",
|
|
+ debug_id, (u64)fda->num_fds);
|
|
+ continue;
|
|
+ }
|
|
+ if (fd_buf_size > parent->length ||
|
|
+ fda->parent_offset > parent->length - fd_buf_size) {
|
|
+ /* No space for all file descriptors here. */
|
|
+ pr_err("transaction release %d not enough space for %lld fds in buffer\n",
|
|
+ debug_id, (u64)fda->num_fds);
|
|
+ continue;
|
|
+ }
|
|
+ fd_array = (u32 *)(parent_buffer + fda->parent_offset);
|
|
+ for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
|
|
+ task_close_fd(proc, fd_array[fd_index]);
|
|
+ } break;
|
|
default:
|
|
pr_err("transaction release %d bad object type %x\n",
|
|
- debug_id, fp->type);
|
|
+ debug_id, hdr->type);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
+static int binder_translate_binder(struct flat_binder_object *fp,
|
|
+ struct binder_transaction *t,
|
|
+ struct binder_thread *thread)
|
|
+{
|
|
+ struct binder_node *node;
|
|
+ struct binder_proc *proc = thread->proc;
|
|
+ struct binder_proc *target_proc = t->to_proc;
|
|
+ struct binder_ref_data rdata;
|
|
+ int ret = 0;
|
|
+
|
|
+ node = binder_get_node(proc, fp->binder);
|
|
+ if (!node) {
|
|
+ node = binder_new_node(proc, fp);
|
|
+ if (!node)
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+ if (fp->cookie != node->cookie) {
|
|
+ binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
|
|
+ proc->pid, thread->pid, (u64)fp->binder,
|
|
+ node->debug_id, (u64)fp->cookie,
|
|
+ (u64)node->cookie);
|
|
+ ret = -EINVAL;
|
|
+ goto done;
|
|
+ }
|
|
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
|
|
+ ret = -EPERM;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+ ret = binder_inc_ref_for_node(target_proc, node,
|
|
+ fp->hdr.type == BINDER_TYPE_BINDER,
|
|
+ &thread->todo, &rdata);
|
|
+ if (ret)
|
|
+ goto done;
|
|
+
|
|
+ if (fp->hdr.type == BINDER_TYPE_BINDER)
|
|
+ fp->hdr.type = BINDER_TYPE_HANDLE;
|
|
+ else
|
|
+ fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
|
|
+ fp->binder = 0;
|
|
+ fp->handle = rdata.desc;
|
|
+ fp->cookie = 0;
|
|
+
|
|
+ trace_binder_transaction_node_to_ref(t, node, &rdata);
|
|
+ binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
+ " node %d u%016llx -> ref %d desc %d\n",
|
|
+ node->debug_id, (u64)node->ptr,
|
|
+ rdata.debug_id, rdata.desc);
|
|
+done:
|
|
+ binder_put_node(node);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int binder_translate_handle(struct flat_binder_object *fp,
|
|
+ struct binder_transaction *t,
|
|
+ struct binder_thread *thread)
|
|
+{
|
|
+ struct binder_proc *proc = thread->proc;
|
|
+ struct binder_proc *target_proc = t->to_proc;
|
|
+ struct binder_node *node;
|
|
+ struct binder_ref_data src_rdata;
|
|
+ int ret = 0;
|
|
+
|
|
+ node = binder_get_node_from_ref(proc, fp->handle,
|
|
+ fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
|
|
+ if (!node) {
|
|
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
|
|
+ proc->pid, thread->pid, fp->handle);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
|
|
+ ret = -EPERM;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+ binder_node_lock(node);
|
|
+ if (node->proc == target_proc) {
|
|
+ if (fp->hdr.type == BINDER_TYPE_HANDLE)
|
|
+ fp->hdr.type = BINDER_TYPE_BINDER;
|
|
+ else
|
|
+ fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
|
|
+ fp->binder = node->ptr;
|
|
+ fp->cookie = node->cookie;
|
|
+ if (node->proc)
|
|
+ binder_inner_proc_lock(node->proc);
|
|
+ binder_inc_node_nilocked(node,
|
|
+ fp->hdr.type == BINDER_TYPE_BINDER,
|
|
+ 0, NULL);
|
|
+ if (node->proc)
|
|
+ binder_inner_proc_unlock(node->proc);
|
|
+ trace_binder_transaction_ref_to_node(t, node, &src_rdata);
|
|
+ binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
+ " ref %d desc %d -> node %d u%016llx\n",
|
|
+ src_rdata.debug_id, src_rdata.desc, node->debug_id,
|
|
+ (u64)node->ptr);
|
|
+ binder_node_unlock(node);
|
|
+ } else {
|
|
+ struct binder_ref_data dest_rdata;
|
|
+
|
|
+ binder_node_unlock(node);
|
|
+ ret = binder_inc_ref_for_node(target_proc, node,
|
|
+ fp->hdr.type == BINDER_TYPE_HANDLE,
|
|
+ NULL, &dest_rdata);
|
|
+ if (ret)
|
|
+ goto done;
|
|
+
|
|
+ fp->binder = 0;
|
|
+ fp->handle = dest_rdata.desc;
|
|
+ fp->cookie = 0;
|
|
+ trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
|
|
+ &dest_rdata);
|
|
+ binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
|
|
+ src_rdata.debug_id, src_rdata.desc,
|
|
+ dest_rdata.debug_id, dest_rdata.desc,
|
|
+ node->debug_id);
|
|
+ }
|
|
+done:
|
|
+ binder_put_node(node);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int binder_translate_fd(int fd,
|
|
+ struct binder_transaction *t,
|
|
+ struct binder_thread *thread,
|
|
+ struct binder_transaction *in_reply_to)
|
|
+{
|
|
+ struct binder_proc *proc = thread->proc;
|
|
+ struct binder_proc *target_proc = t->to_proc;
|
|
+ int target_fd;
|
|
+ struct file *file;
|
|
+ int ret;
|
|
+ bool target_allows_fd;
|
|
+
|
|
+ if (in_reply_to)
|
|
+ target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
|
|
+ else
|
|
+ target_allows_fd = t->buffer->target_node->accept_fds;
|
|
+ if (!target_allows_fd) {
|
|
+ binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
|
|
+ proc->pid, thread->pid,
|
|
+ in_reply_to ? "reply" : "transaction",
|
|
+ fd);
|
|
+ ret = -EPERM;
|
|
+ goto err_fd_not_accepted;
|
|
+ }
|
|
+
|
|
+ file = fget(fd);
|
|
+ if (!file) {
|
|
+ binder_user_error("%d:%d got transaction with invalid fd, %d\n",
|
|
+ proc->pid, thread->pid, fd);
|
|
+ ret = -EBADF;
|
|
+ goto err_fget;
|
|
+ }
|
|
+ ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
|
|
+ if (ret < 0) {
|
|
+ ret = -EPERM;
|
|
+ goto err_security;
|
|
+ }
|
|
+
|
|
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
|
|
+ if (target_fd < 0) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_get_unused_fd;
|
|
+ }
|
|
+ task_fd_install(target_proc, target_fd, file);
|
|
+ trace_binder_transaction_fd(t, fd, target_fd);
|
|
+ binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
|
|
+ fd, target_fd);
|
|
+
|
|
+ return target_fd;
|
|
+
|
|
+err_get_unused_fd:
|
|
+err_security:
|
|
+ fput(file);
|
|
+err_fget:
|
|
+err_fd_not_accepted:
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int binder_translate_fd_array(struct binder_fd_array_object *fda,
|
|
+ struct binder_buffer_object *parent,
|
|
+ struct binder_transaction *t,
|
|
+ struct binder_thread *thread,
|
|
+ struct binder_transaction *in_reply_to)
|
|
+{
|
|
+ binder_size_t fdi, fd_buf_size, num_installed_fds;
|
|
+ int target_fd;
|
|
+ uintptr_t parent_buffer;
|
|
+ u32 *fd_array;
|
|
+ struct binder_proc *proc = thread->proc;
|
|
+ struct binder_proc *target_proc = t->to_proc;
|
|
+
|
|
+ fd_buf_size = sizeof(u32) * fda->num_fds;
|
|
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
|
|
+ binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
|
|
+ proc->pid, thread->pid, (u64)fda->num_fds);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (fd_buf_size > parent->length ||
|
|
+ fda->parent_offset > parent->length - fd_buf_size) {
|
|
+ /* No space for all file descriptors here. */
|
|
+ binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
|
|
+ proc->pid, thread->pid, (u64)fda->num_fds);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ /*
|
|
+ * Since the parent was already fixed up, convert it
|
|
+ * back to the kernel address space to access it
|
|
+ */
|
|
+ parent_buffer = parent->buffer -
|
|
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
|
|
+ fd_array = (u32 *)(parent_buffer + fda->parent_offset);
|
|
+ if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
|
|
+ binder_user_error("%d:%d parent offset not aligned correctly.\n",
|
|
+ proc->pid, thread->pid);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ for (fdi = 0; fdi < fda->num_fds; fdi++) {
|
|
+ target_fd = binder_translate_fd(fd_array[fdi], t, thread,
|
|
+ in_reply_to);
|
|
+ if (target_fd < 0)
|
|
+ goto err_translate_fd_failed;
|
|
+ fd_array[fdi] = target_fd;
|
|
+ }
|
|
+ return 0;
|
|
+
|
|
+err_translate_fd_failed:
|
|
+ /*
|
|
+ * Failed to allocate fd or security error, free fds
|
|
+ * installed so far.
|
|
+ */
|
|
+ num_installed_fds = fdi;
|
|
+ for (fdi = 0; fdi < num_installed_fds; fdi++)
|
|
+ task_close_fd(target_proc, fd_array[fdi]);
|
|
+ return target_fd;
|
|
+}
|
|
+
|
|
+static int binder_fixup_parent(struct binder_transaction *t,
|
|
+ struct binder_thread *thread,
|
|
+ struct binder_buffer_object *bp,
|
|
+ binder_size_t *off_start,
|
|
+ binder_size_t num_valid,
|
|
+ struct binder_buffer_object *last_fixup_obj,
|
|
+ binder_size_t last_fixup_min_off)
|
|
+{
|
|
+ struct binder_buffer_object *parent;
|
|
+ u8 *parent_buffer;
|
|
+ struct binder_buffer *b = t->buffer;
|
|
+ struct binder_proc *proc = thread->proc;
|
|
+ struct binder_proc *target_proc = t->to_proc;
|
|
+
|
|
+ if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
|
|
+ return 0;
|
|
+
|
|
+ parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
|
|
+ if (!parent) {
|
|
+ binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
|
|
+ proc->pid, thread->pid);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!binder_validate_fixup(b, off_start,
|
|
+ parent, bp->parent_offset,
|
|
+ last_fixup_obj,
|
|
+ last_fixup_min_off)) {
|
|
+ binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
|
|
+ proc->pid, thread->pid);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (parent->length < sizeof(binder_uintptr_t) ||
|
|
+ bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
|
|
+ /* No space for a pointer here! */
|
|
+ binder_user_error("%d:%d got transaction with invalid parent offset\n",
|
|
+ proc->pid, thread->pid);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ parent_buffer = (u8 *)(parent->buffer -
|
|
+ binder_alloc_get_user_buffer_offset(
|
|
+ &target_proc->alloc));
|
|
+ *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
|
|
+ * @t: transaction to send
|
|
+ * @proc: process to send the transaction to
|
|
+ * @thread: thread in @proc to send the transaction to (may be NULL)
|
|
+ *
|
|
+ * This function queues a transaction to the specified process. It will try
|
|
+ * to find a thread in the target process to handle the transaction and
|
|
+ * wake it up. If no thread is found, the work is queued to the proc
|
|
+ * waitqueue.
|
|
+ *
|
|
+ * If the @thread parameter is not NULL, the transaction is always queued
|
|
+ * to the waitlist of that specific thread.
|
|
+ *
|
|
+ * Return: true if the transactions was successfully queued
|
|
+ * false if the target process or thread is dead
|
|
+ */
|
|
+static bool binder_proc_transaction(struct binder_transaction *t,
|
|
+ struct binder_proc *proc,
|
|
+ struct binder_thread *thread)
|
|
+{
|
|
+ struct list_head *target_list = NULL;
|
|
+ struct binder_node *node = t->buffer->target_node;
|
|
+ struct binder_priority node_prio;
|
|
+ bool oneway = !!(t->flags & TF_ONE_WAY);
|
|
+ bool wakeup = true;
|
|
+
|
|
+ BUG_ON(!node);
|
|
+ binder_node_lock(node);
|
|
+ node_prio.prio = node->min_priority;
|
|
+ node_prio.sched_policy = node->sched_policy;
|
|
+
|
|
+ if (oneway) {
|
|
+ BUG_ON(thread);
|
|
+ if (node->has_async_transaction) {
|
|
+ target_list = &node->async_todo;
|
|
+ wakeup = false;
|
|
+ } else {
|
|
+ node->has_async_transaction = 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ binder_inner_proc_lock(proc);
|
|
+
|
|
+ if (proc->is_dead || (thread && thread->is_dead)) {
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ binder_node_unlock(node);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (!thread && !target_list)
|
|
+ thread = binder_select_thread_ilocked(proc);
|
|
+
|
|
+ if (thread) {
|
|
+ target_list = &thread->todo;
|
|
+ binder_transaction_priority(thread->task, t, node_prio,
|
|
+ node->inherit_rt);
|
|
+ } else if (!target_list) {
|
|
+ target_list = &proc->todo;
|
|
+ } else {
|
|
+ BUG_ON(target_list != &node->async_todo);
|
|
+ }
|
|
+
|
|
+ binder_enqueue_work_ilocked(&t->work, target_list);
|
|
+
|
|
+ if (wakeup)
|
|
+ binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
|
+
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ binder_node_unlock(node);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/**
|
|
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
|
|
+ * @node: struct binder_node for which to get refs
|
|
+ * @proc: returns @node->proc if valid
|
|
+ * @error: if no @proc then returns BR_DEAD_REPLY
|
|
+ *
|
|
+ * User-space normally keeps the node alive when creating a transaction
|
|
+ * since it has a reference to the target. The local strong ref keeps it
|
|
+ * alive if the sending process dies before the target process processes
|
|
+ * the transaction. If the source process is malicious or has a reference
|
|
+ * counting bug, relying on the local strong ref can fail.
|
|
+ *
|
|
+ * Since user-space can cause the local strong ref to go away, we also take
|
|
+ * a tmpref on the node to ensure it survives while we are constructing
|
|
+ * the transaction. We also need a tmpref on the proc while we are
|
|
+ * constructing the transaction, so we take that here as well.
|
|
+ *
|
|
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
|
|
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
|
|
+ * target proc has died, @error is set to BR_DEAD_REPLY
|
|
+ */
|
|
+static struct binder_node *binder_get_node_refs_for_txn(
|
|
+ struct binder_node *node,
|
|
+ struct binder_proc **procp,
|
|
+ uint32_t *error)
|
|
+{
|
|
+ struct binder_node *target_node = NULL;
|
|
+
|
|
+ binder_node_inner_lock(node);
|
|
+ if (node->proc) {
|
|
+ target_node = node;
|
|
+ binder_inc_node_nilocked(node, 1, 0, NULL);
|
|
+ binder_inc_node_tmpref_ilocked(node);
|
|
+ node->proc->tmp_ref++;
|
|
+ *procp = node->proc;
|
|
+ } else
|
|
+ *error = BR_DEAD_REPLY;
|
|
+ binder_node_inner_unlock(node);
|
|
+
|
|
+ return target_node;
|
|
+}
|
|
+
|
|
static void binder_transaction(struct binder_proc *proc,
|
|
struct binder_thread *thread,
|
|
- struct binder_transaction_data *tr, int reply)
|
|
+ struct binder_transaction_data *tr, int reply,
|
|
+ binder_size_t extra_buffers_size)
|
|
{
|
|
+ int ret;
|
|
struct binder_transaction *t;
|
|
struct binder_work *tcomplete;
|
|
- binder_size_t *offp, *off_end;
|
|
+ binder_size_t *offp, *off_end, *off_start;
|
|
binder_size_t off_min;
|
|
- struct binder_proc *target_proc;
|
|
+ u8 *sg_bufp, *sg_buf_end;
|
|
+ struct binder_proc *target_proc = NULL;
|
|
struct binder_thread *target_thread = NULL;
|
|
struct binder_node *target_node = NULL;
|
|
- struct list_head *target_list;
|
|
- wait_queue_head_t *target_wait;
|
|
struct binder_transaction *in_reply_to = NULL;
|
|
struct binder_transaction_log_entry *e;
|
|
- uint32_t return_error;
|
|
+ uint32_t return_error = 0;
|
|
+ uint32_t return_error_param = 0;
|
|
+ uint32_t return_error_line = 0;
|
|
+ struct binder_buffer_object *last_fixup_obj = NULL;
|
|
+ binder_size_t last_fixup_min_off = 0;
|
|
+ struct binder_context *context = proc->context;
|
|
+ int t_debug_id = atomic_inc_return(&binder_last_id);
|
|
|
|
e = binder_transaction_log_add(&binder_transaction_log);
|
|
+ e->debug_id = t_debug_id;
|
|
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
|
|
e->from_proc = proc->pid;
|
|
e->from_thread = thread->pid;
|
|
e->target_handle = tr->target.handle;
|
|
e->data_size = tr->data_size;
|
|
e->offsets_size = tr->offsets_size;
|
|
+ e->context_name = proc->context->name;
|
|
|
|
if (reply) {
|
|
+ binder_inner_proc_lock(proc);
|
|
in_reply_to = thread->transaction_stack;
|
|
if (in_reply_to == NULL) {
|
|
+ binder_inner_proc_unlock(proc);
|
|
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EPROTO;
|
|
+ return_error_line = __LINE__;
|
|
goto err_empty_call_stack;
|
|
}
|
|
- binder_set_nice(in_reply_to->saved_priority);
|
|
if (in_reply_to->to_thread != thread) {
|
|
+ spin_lock(&in_reply_to->lock);
|
|
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
|
|
proc->pid, thread->pid, in_reply_to->debug_id,
|
|
in_reply_to->to_proc ?
|
|
in_reply_to->to_proc->pid : 0,
|
|
in_reply_to->to_thread ?
|
|
in_reply_to->to_thread->pid : 0);
|
|
+ spin_unlock(&in_reply_to->lock);
|
|
+ binder_inner_proc_unlock(proc);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EPROTO;
|
|
+ return_error_line = __LINE__;
|
|
in_reply_to = NULL;
|
|
goto err_bad_call_stack;
|
|
}
|
|
thread->transaction_stack = in_reply_to->to_parent;
|
|
- target_thread = in_reply_to->from;
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
|
|
if (target_thread == NULL) {
|
|
return_error = BR_DEAD_REPLY;
|
|
+ return_error_line = __LINE__;
|
|
goto err_dead_binder;
|
|
}
|
|
if (target_thread->transaction_stack != in_reply_to) {
|
|
@@ -1377,106 +2873,148 @@ static void binder_transaction(struct bi
|
|
target_thread->transaction_stack ?
|
|
target_thread->transaction_stack->debug_id : 0,
|
|
in_reply_to->debug_id);
|
|
+ binder_inner_proc_unlock(target_thread->proc);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EPROTO;
|
|
+ return_error_line = __LINE__;
|
|
in_reply_to = NULL;
|
|
target_thread = NULL;
|
|
goto err_dead_binder;
|
|
}
|
|
target_proc = target_thread->proc;
|
|
+ target_proc->tmp_ref++;
|
|
+ binder_inner_proc_unlock(target_thread->proc);
|
|
} else {
|
|
if (tr->target.handle) {
|
|
struct binder_ref *ref;
|
|
|
|
- ref = binder_get_ref(proc, tr->target.handle, true);
|
|
- if (ref == NULL) {
|
|
+ /*
|
|
+ * There must already be a strong ref
|
|
+ * on this node. If so, do a strong
|
|
+ * increment on the node to ensure it
|
|
+ * stays alive until the transaction is
|
|
+ * done.
|
|
+ */
|
|
+ binder_proc_lock(proc);
|
|
+ ref = binder_get_ref_olocked(proc, tr->target.handle,
|
|
+ true);
|
|
+ if (ref) {
|
|
+ target_node = binder_get_node_refs_for_txn(
|
|
+ ref->node, &target_proc,
|
|
+ &return_error);
|
|
+ } else {
|
|
binder_user_error("%d:%d got transaction to invalid handle\n",
|
|
- proc->pid, thread->pid);
|
|
+ proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
- goto err_invalid_target_handle;
|
|
}
|
|
- target_node = ref->node;
|
|
+ binder_proc_unlock(proc);
|
|
} else {
|
|
- target_node = binder_context_mgr_node;
|
|
- if (target_node == NULL) {
|
|
+ mutex_lock(&context->context_mgr_node_lock);
|
|
+ target_node = context->binder_context_mgr_node;
|
|
+ if (target_node)
|
|
+ target_node = binder_get_node_refs_for_txn(
|
|
+ target_node, &target_proc,
|
|
+ &return_error);
|
|
+ else
|
|
return_error = BR_DEAD_REPLY;
|
|
- goto err_no_context_mgr_node;
|
|
- }
|
|
+ mutex_unlock(&context->context_mgr_node_lock);
|
|
}
|
|
- e->to_node = target_node->debug_id;
|
|
- target_proc = target_node->proc;
|
|
- if (target_proc == NULL) {
|
|
- return_error = BR_DEAD_REPLY;
|
|
+ if (!target_node) {
|
|
+ /*
|
|
+ * return_error is set above
|
|
+ */
|
|
+ return_error_param = -EINVAL;
|
|
+ return_error_line = __LINE__;
|
|
goto err_dead_binder;
|
|
}
|
|
+ e->to_node = target_node->debug_id;
|
|
if (security_binder_transaction(proc->tsk,
|
|
target_proc->tsk) < 0) {
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EPERM;
|
|
+ return_error_line = __LINE__;
|
|
goto err_invalid_target_handle;
|
|
}
|
|
+ binder_inner_proc_lock(proc);
|
|
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
|
|
struct binder_transaction *tmp;
|
|
|
|
tmp = thread->transaction_stack;
|
|
if (tmp->to_thread != thread) {
|
|
+ spin_lock(&tmp->lock);
|
|
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
|
|
proc->pid, thread->pid, tmp->debug_id,
|
|
tmp->to_proc ? tmp->to_proc->pid : 0,
|
|
tmp->to_thread ?
|
|
tmp->to_thread->pid : 0);
|
|
+ spin_unlock(&tmp->lock);
|
|
+ binder_inner_proc_unlock(proc);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EPROTO;
|
|
+ return_error_line = __LINE__;
|
|
goto err_bad_call_stack;
|
|
}
|
|
while (tmp) {
|
|
- if (tmp->from && tmp->from->proc == target_proc)
|
|
- target_thread = tmp->from;
|
|
+ struct binder_thread *from;
|
|
+
|
|
+ spin_lock(&tmp->lock);
|
|
+ from = tmp->from;
|
|
+ if (from && from->proc == target_proc) {
|
|
+ atomic_inc(&from->tmp_ref);
|
|
+ target_thread = from;
|
|
+ spin_unlock(&tmp->lock);
|
|
+ break;
|
|
+ }
|
|
+ spin_unlock(&tmp->lock);
|
|
tmp = tmp->from_parent;
|
|
}
|
|
}
|
|
+ binder_inner_proc_unlock(proc);
|
|
}
|
|
- if (target_thread) {
|
|
+ if (target_thread)
|
|
e->to_thread = target_thread->pid;
|
|
- target_list = &target_thread->todo;
|
|
- target_wait = &target_thread->wait;
|
|
- } else {
|
|
- target_list = &target_proc->todo;
|
|
- target_wait = &target_proc->wait;
|
|
- }
|
|
e->to_proc = target_proc->pid;
|
|
|
|
/* TODO: reuse incoming transaction for reply */
|
|
t = kzalloc(sizeof(*t), GFP_KERNEL);
|
|
if (t == NULL) {
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -ENOMEM;
|
|
+ return_error_line = __LINE__;
|
|
goto err_alloc_t_failed;
|
|
}
|
|
binder_stats_created(BINDER_STAT_TRANSACTION);
|
|
+ spin_lock_init(&t->lock);
|
|
|
|
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
|
|
if (tcomplete == NULL) {
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -ENOMEM;
|
|
+ return_error_line = __LINE__;
|
|
goto err_alloc_tcomplete_failed;
|
|
}
|
|
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
|
|
- t->debug_id = ++binder_last_id;
|
|
- e->debug_id = t->debug_id;
|
|
+ t->debug_id = t_debug_id;
|
|
|
|
if (reply)
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
|
|
+ "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
|
|
proc->pid, thread->pid, t->debug_id,
|
|
target_proc->pid, target_thread->pid,
|
|
(u64)tr->data.ptr.buffer,
|
|
(u64)tr->data.ptr.offsets,
|
|
- (u64)tr->data_size, (u64)tr->offsets_size);
|
|
+ (u64)tr->data_size, (u64)tr->offsets_size,
|
|
+ (u64)extra_buffers_size);
|
|
else
|
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
|
|
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
|
|
proc->pid, thread->pid, t->debug_id,
|
|
target_proc->pid, target_node->debug_id,
|
|
(u64)tr->data.ptr.buffer,
|
|
(u64)tr->data.ptr.offsets,
|
|
- (u64)tr->data_size, (u64)tr->offsets_size);
|
|
+ (u64)tr->data_size, (u64)tr->offsets_size,
|
|
+ (u64)extra_buffers_size);
|
|
|
|
if (!reply && !(tr->flags & TF_ONE_WAY))
|
|
t->from = thread;
|
|
@@ -1487,32 +3025,47 @@ static void binder_transaction(struct bi
|
|
t->to_thread = target_thread;
|
|
t->code = tr->code;
|
|
t->flags = tr->flags;
|
|
- t->priority = task_nice(current);
|
|
+ if (!(t->flags & TF_ONE_WAY) &&
|
|
+ binder_supported_policy(current->policy)) {
|
|
+ /* Inherit supported policies for synchronous transactions */
|
|
+ t->priority.sched_policy = current->policy;
|
|
+ t->priority.prio = current->normal_prio;
|
|
+ } else {
|
|
+ /* Otherwise, fall back to the default priority */
|
|
+ t->priority = target_proc->default_priority;
|
|
+ }
|
|
|
|
trace_binder_transaction(reply, t, target_node);
|
|
|
|
- t->buffer = binder_alloc_buf(target_proc, tr->data_size,
|
|
- tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
|
|
- if (t->buffer == NULL) {
|
|
- return_error = BR_FAILED_REPLY;
|
|
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
|
|
+ tr->offsets_size, extra_buffers_size,
|
|
+ !reply && (t->flags & TF_ONE_WAY));
|
|
+ if (IS_ERR(t->buffer)) {
|
|
+ /*
|
|
+ * -ESRCH indicates VMA cleared. The target is dying.
|
|
+ */
|
|
+ return_error_param = PTR_ERR(t->buffer);
|
|
+ return_error = return_error_param == -ESRCH ?
|
|
+ BR_DEAD_REPLY : BR_FAILED_REPLY;
|
|
+ return_error_line = __LINE__;
|
|
+ t->buffer = NULL;
|
|
goto err_binder_alloc_buf_failed;
|
|
}
|
|
- t->buffer->allow_user_free = 0;
|
|
t->buffer->debug_id = t->debug_id;
|
|
t->buffer->transaction = t;
|
|
t->buffer->target_node = target_node;
|
|
trace_binder_transaction_alloc_buf(t->buffer);
|
|
- if (target_node)
|
|
- binder_inc_node(target_node, 1, 0, NULL);
|
|
-
|
|
- offp = (binder_size_t *)(t->buffer->data +
|
|
- ALIGN(tr->data_size, sizeof(void *)));
|
|
+ off_start = (binder_size_t *)(t->buffer->data +
|
|
+ ALIGN(tr->data_size, sizeof(void *)));
|
|
+ offp = off_start;
|
|
|
|
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
|
|
tr->data.ptr.buffer, tr->data_size)) {
|
|
binder_user_error("%d:%d got transaction with invalid data ptr\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EFAULT;
|
|
+ return_error_line = __LINE__;
|
|
goto err_copy_data_failed;
|
|
}
|
|
if (copy_from_user(offp, (const void __user *)(uintptr_t)
|
|
@@ -1520,231 +3073,244 @@ static void binder_transaction(struct bi
|
|
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
|
proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EFAULT;
|
|
+ return_error_line = __LINE__;
|
|
goto err_copy_data_failed;
|
|
}
|
|
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
|
|
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
|
|
proc->pid, thread->pid, (u64)tr->offsets_size);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EINVAL;
|
|
+ return_error_line = __LINE__;
|
|
goto err_bad_offset;
|
|
}
|
|
- off_end = (void *)offp + tr->offsets_size;
|
|
+ if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
|
|
+ binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
|
|
+ proc->pid, thread->pid,
|
|
+ (u64)extra_buffers_size);
|
|
+ return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EINVAL;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_bad_offset;
|
|
+ }
|
|
+ off_end = (void *)off_start + tr->offsets_size;
|
|
+ sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
|
|
+ sg_buf_end = sg_bufp + extra_buffers_size;
|
|
off_min = 0;
|
|
for (; offp < off_end; offp++) {
|
|
- struct flat_binder_object *fp;
|
|
+ struct binder_object_header *hdr;
|
|
+ size_t object_size = binder_validate_object(t->buffer, *offp);
|
|
|
|
- if (*offp > t->buffer->data_size - sizeof(*fp) ||
|
|
- *offp < off_min ||
|
|
- t->buffer->data_size < sizeof(*fp) ||
|
|
- !IS_ALIGNED(*offp, sizeof(u32))) {
|
|
- binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
|
|
+ if (object_size == 0 || *offp < off_min) {
|
|
+ binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
|
|
proc->pid, thread->pid, (u64)*offp,
|
|
(u64)off_min,
|
|
- (u64)(t->buffer->data_size -
|
|
- sizeof(*fp)));
|
|
+ (u64)t->buffer->data_size);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EINVAL;
|
|
+ return_error_line = __LINE__;
|
|
goto err_bad_offset;
|
|
}
|
|
- fp = (struct flat_binder_object *)(t->buffer->data + *offp);
|
|
- off_min = *offp + sizeof(struct flat_binder_object);
|
|
- switch (fp->type) {
|
|
+
|
|
+ hdr = (struct binder_object_header *)(t->buffer->data + *offp);
|
|
+ off_min = *offp + object_size;
|
|
+ switch (hdr->type) {
|
|
case BINDER_TYPE_BINDER:
|
|
case BINDER_TYPE_WEAK_BINDER: {
|
|
- struct binder_ref *ref;
|
|
- struct binder_node *node = binder_get_node(proc, fp->binder);
|
|
+ struct flat_binder_object *fp;
|
|
|
|
- if (node == NULL) {
|
|
- node = binder_new_node(proc, fp->binder, fp->cookie);
|
|
- if (node == NULL) {
|
|
- return_error = BR_FAILED_REPLY;
|
|
- goto err_binder_new_node_failed;
|
|
- }
|
|
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
|
|
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
|
|
- }
|
|
- if (fp->cookie != node->cookie) {
|
|
- binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
|
|
- proc->pid, thread->pid,
|
|
- (u64)fp->binder, node->debug_id,
|
|
- (u64)fp->cookie, (u64)node->cookie);
|
|
- return_error = BR_FAILED_REPLY;
|
|
- goto err_binder_get_ref_for_node_failed;
|
|
- }
|
|
- if (security_binder_transfer_binder(proc->tsk,
|
|
- target_proc->tsk)) {
|
|
- return_error = BR_FAILED_REPLY;
|
|
- goto err_binder_get_ref_for_node_failed;
|
|
- }
|
|
- ref = binder_get_ref_for_node(target_proc, node);
|
|
- if (ref == NULL) {
|
|
+ fp = to_flat_binder_object(hdr);
|
|
+ ret = binder_translate_binder(fp, t, thread);
|
|
+ if (ret < 0) {
|
|
return_error = BR_FAILED_REPLY;
|
|
- goto err_binder_get_ref_for_node_failed;
|
|
+ return_error_param = ret;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_translate_failed;
|
|
}
|
|
- if (fp->type == BINDER_TYPE_BINDER)
|
|
- fp->type = BINDER_TYPE_HANDLE;
|
|
- else
|
|
- fp->type = BINDER_TYPE_WEAK_HANDLE;
|
|
- fp->binder = 0;
|
|
- fp->handle = ref->desc;
|
|
- fp->cookie = 0;
|
|
- binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
|
|
- &thread->todo);
|
|
-
|
|
- trace_binder_transaction_node_to_ref(t, node, ref);
|
|
- binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
- " node %d u%016llx -> ref %d desc %d\n",
|
|
- node->debug_id, (u64)node->ptr,
|
|
- ref->debug_id, ref->desc);
|
|
} break;
|
|
case BINDER_TYPE_HANDLE:
|
|
case BINDER_TYPE_WEAK_HANDLE: {
|
|
- struct binder_ref *ref;
|
|
-
|
|
- ref = binder_get_ref(proc, fp->handle,
|
|
- fp->type == BINDER_TYPE_HANDLE);
|
|
+ struct flat_binder_object *fp;
|
|
|
|
- if (ref == NULL) {
|
|
- binder_user_error("%d:%d got transaction with invalid handle, %d\n",
|
|
- proc->pid,
|
|
- thread->pid, fp->handle);
|
|
- return_error = BR_FAILED_REPLY;
|
|
- goto err_binder_get_ref_failed;
|
|
- }
|
|
- if (security_binder_transfer_binder(proc->tsk,
|
|
- target_proc->tsk)) {
|
|
+ fp = to_flat_binder_object(hdr);
|
|
+ ret = binder_translate_handle(fp, t, thread);
|
|
+ if (ret < 0) {
|
|
return_error = BR_FAILED_REPLY;
|
|
- goto err_binder_get_ref_failed;
|
|
- }
|
|
- if (ref->node->proc == target_proc) {
|
|
- if (fp->type == BINDER_TYPE_HANDLE)
|
|
- fp->type = BINDER_TYPE_BINDER;
|
|
- else
|
|
- fp->type = BINDER_TYPE_WEAK_BINDER;
|
|
- fp->binder = ref->node->ptr;
|
|
- fp->cookie = ref->node->cookie;
|
|
- binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
|
|
- trace_binder_transaction_ref_to_node(t, ref);
|
|
- binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
- " ref %d desc %d -> node %d u%016llx\n",
|
|
- ref->debug_id, ref->desc, ref->node->debug_id,
|
|
- (u64)ref->node->ptr);
|
|
- } else {
|
|
- struct binder_ref *new_ref;
|
|
-
|
|
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
|
|
- if (new_ref == NULL) {
|
|
- return_error = BR_FAILED_REPLY;
|
|
- goto err_binder_get_ref_for_node_failed;
|
|
- }
|
|
- fp->binder = 0;
|
|
- fp->handle = new_ref->desc;
|
|
- fp->cookie = 0;
|
|
- binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
|
|
- trace_binder_transaction_ref_to_ref(t, ref,
|
|
- new_ref);
|
|
- binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
- " ref %d desc %d -> ref %d desc %d (node %d)\n",
|
|
- ref->debug_id, ref->desc, new_ref->debug_id,
|
|
- new_ref->desc, ref->node->debug_id);
|
|
+ return_error_param = ret;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_translate_failed;
|
|
}
|
|
} break;
|
|
|
|
case BINDER_TYPE_FD: {
|
|
- int target_fd;
|
|
- struct file *file;
|
|
+ struct binder_fd_object *fp = to_binder_fd_object(hdr);
|
|
+ int target_fd = binder_translate_fd(fp->fd, t, thread,
|
|
+ in_reply_to);
|
|
|
|
- if (reply) {
|
|
- if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
|
|
- binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
|
|
- proc->pid, thread->pid, fp->handle);
|
|
- return_error = BR_FAILED_REPLY;
|
|
- goto err_fd_not_allowed;
|
|
- }
|
|
- } else if (!target_node->accept_fds) {
|
|
- binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
|
|
- proc->pid, thread->pid, fp->handle);
|
|
+ if (target_fd < 0) {
|
|
return_error = BR_FAILED_REPLY;
|
|
- goto err_fd_not_allowed;
|
|
+ return_error_param = target_fd;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_translate_failed;
|
|
}
|
|
-
|
|
- file = fget(fp->handle);
|
|
- if (file == NULL) {
|
|
- binder_user_error("%d:%d got transaction with invalid fd, %d\n",
|
|
- proc->pid, thread->pid, fp->handle);
|
|
+ fp->pad_binder = 0;
|
|
+ fp->fd = target_fd;
|
|
+ } break;
|
|
+ case BINDER_TYPE_FDA: {
|
|
+ struct binder_fd_array_object *fda =
|
|
+ to_binder_fd_array_object(hdr);
|
|
+ struct binder_buffer_object *parent =
|
|
+ binder_validate_ptr(t->buffer, fda->parent,
|
|
+ off_start,
|
|
+ offp - off_start);
|
|
+ if (!parent) {
|
|
+ binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
|
|
+ proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
- goto err_fget_failed;
|
|
+ return_error_param = -EINVAL;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_bad_parent;
|
|
}
|
|
- if (security_binder_transfer_file(proc->tsk,
|
|
- target_proc->tsk,
|
|
- file) < 0) {
|
|
- fput(file);
|
|
+ if (!binder_validate_fixup(t->buffer, off_start,
|
|
+ parent, fda->parent_offset,
|
|
+ last_fixup_obj,
|
|
+ last_fixup_min_off)) {
|
|
+ binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
|
|
+ proc->pid, thread->pid);
|
|
return_error = BR_FAILED_REPLY;
|
|
- goto err_get_unused_fd_failed;
|
|
+ return_error_param = -EINVAL;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_bad_parent;
|
|
}
|
|
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
|
|
- if (target_fd < 0) {
|
|
- fput(file);
|
|
+ ret = binder_translate_fd_array(fda, parent, t, thread,
|
|
+ in_reply_to);
|
|
+ if (ret < 0) {
|
|
return_error = BR_FAILED_REPLY;
|
|
- goto err_get_unused_fd_failed;
|
|
+ return_error_param = ret;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_translate_failed;
|
|
}
|
|
- task_fd_install(target_proc, target_fd, file);
|
|
- trace_binder_transaction_fd(t, fp->handle, target_fd);
|
|
- binder_debug(BINDER_DEBUG_TRANSACTION,
|
|
- " fd %d -> %d\n", fp->handle, target_fd);
|
|
- /* TODO: fput? */
|
|
- fp->binder = 0;
|
|
- fp->handle = target_fd;
|
|
+ last_fixup_obj = parent;
|
|
+ last_fixup_min_off =
|
|
+ fda->parent_offset + sizeof(u32) * fda->num_fds;
|
|
} break;
|
|
+ case BINDER_TYPE_PTR: {
|
|
+ struct binder_buffer_object *bp =
|
|
+ to_binder_buffer_object(hdr);
|
|
+ size_t buf_left = sg_buf_end - sg_bufp;
|
|
+
|
|
+ if (bp->length > buf_left) {
|
|
+ binder_user_error("%d:%d got transaction with too large buffer\n",
|
|
+ proc->pid, thread->pid);
|
|
+ return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EINVAL;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_bad_offset;
|
|
+ }
|
|
+ if (copy_from_user(sg_bufp,
|
|
+ (const void __user *)(uintptr_t)
|
|
+ bp->buffer, bp->length)) {
|
|
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
|
|
+ proc->pid, thread->pid);
|
|
+ return_error_param = -EFAULT;
|
|
+ return_error = BR_FAILED_REPLY;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_copy_data_failed;
|
|
+ }
|
|
+ /* Fixup buffer pointer to target proc address space */
|
|
+ bp->buffer = (uintptr_t)sg_bufp +
|
|
+ binder_alloc_get_user_buffer_offset(
|
|
+ &target_proc->alloc);
|
|
+ sg_bufp += ALIGN(bp->length, sizeof(u64));
|
|
|
|
+ ret = binder_fixup_parent(t, thread, bp, off_start,
|
|
+ offp - off_start,
|
|
+ last_fixup_obj,
|
|
+ last_fixup_min_off);
|
|
+ if (ret < 0) {
|
|
+ return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = ret;
|
|
+ return_error_line = __LINE__;
|
|
+ goto err_translate_failed;
|
|
+ }
|
|
+ last_fixup_obj = bp;
|
|
+ last_fixup_min_off = 0;
|
|
+ } break;
|
|
default:
|
|
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
|
|
- proc->pid, thread->pid, fp->type);
|
|
+ proc->pid, thread->pid, hdr->type);
|
|
return_error = BR_FAILED_REPLY;
|
|
+ return_error_param = -EINVAL;
|
|
+ return_error_line = __LINE__;
|
|
goto err_bad_object_type;
|
|
}
|
|
}
|
|
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
|
|
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
|
|
+ t->work.type = BINDER_WORK_TRANSACTION;
|
|
+
|
|
if (reply) {
|
|
+ binder_inner_proc_lock(target_proc);
|
|
+ if (target_thread->is_dead) {
|
|
+ binder_inner_proc_unlock(target_proc);
|
|
+ goto err_dead_proc_or_thread;
|
|
+ }
|
|
BUG_ON(t->buffer->async_transaction != 0);
|
|
- binder_pop_transaction(target_thread, in_reply_to);
|
|
+ binder_pop_transaction_ilocked(target_thread, in_reply_to);
|
|
+ binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
|
|
+ binder_inner_proc_unlock(target_proc);
|
|
+ wake_up_interruptible_sync(&target_thread->wait);
|
|
+ binder_restore_priority(current, in_reply_to->saved_priority);
|
|
+ binder_free_transaction(in_reply_to);
|
|
} else if (!(t->flags & TF_ONE_WAY)) {
|
|
BUG_ON(t->buffer->async_transaction != 0);
|
|
+ binder_inner_proc_lock(proc);
|
|
t->need_reply = 1;
|
|
t->from_parent = thread->transaction_stack;
|
|
thread->transaction_stack = t;
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ if (!binder_proc_transaction(t, target_proc, target_thread)) {
|
|
+ binder_inner_proc_lock(proc);
|
|
+ binder_pop_transaction_ilocked(thread, t);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ goto err_dead_proc_or_thread;
|
|
+ }
|
|
} else {
|
|
BUG_ON(target_node == NULL);
|
|
BUG_ON(t->buffer->async_transaction != 1);
|
|
- if (target_node->has_async_transaction) {
|
|
- target_list = &target_node->async_todo;
|
|
- target_wait = NULL;
|
|
- } else
|
|
- target_node->has_async_transaction = 1;
|
|
- }
|
|
- t->work.type = BINDER_WORK_TRANSACTION;
|
|
- list_add_tail(&t->work.entry, target_list);
|
|
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
|
|
- list_add_tail(&tcomplete->entry, &thread->todo);
|
|
- if (target_wait) {
|
|
- if (reply || !(t->flags & TF_ONE_WAY))
|
|
- wake_up_interruptible_sync(target_wait);
|
|
- else
|
|
- wake_up_interruptible(target_wait);
|
|
+ if (!binder_proc_transaction(t, target_proc, NULL))
|
|
+ goto err_dead_proc_or_thread;
|
|
}
|
|
+ if (target_thread)
|
|
+ binder_thread_dec_tmpref(target_thread);
|
|
+ binder_proc_dec_tmpref(target_proc);
|
|
+ if (target_node)
|
|
+ binder_dec_node_tmpref(target_node);
|
|
+ /*
|
|
+ * write barrier to synchronize with initialization
|
|
+ * of log entry
|
|
+ */
|
|
+ smp_wmb();
|
|
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
|
|
return;
|
|
|
|
-err_get_unused_fd_failed:
|
|
-err_fget_failed:
|
|
-err_fd_not_allowed:
|
|
-err_binder_get_ref_for_node_failed:
|
|
-err_binder_get_ref_failed:
|
|
-err_binder_new_node_failed:
|
|
+err_dead_proc_or_thread:
|
|
+ return_error = BR_DEAD_REPLY;
|
|
+ return_error_line = __LINE__;
|
|
+ binder_dequeue_work(proc, tcomplete);
|
|
+err_translate_failed:
|
|
err_bad_object_type:
|
|
err_bad_offset:
|
|
+err_bad_parent:
|
|
err_copy_data_failed:
|
|
trace_binder_transaction_failed_buffer_release(t->buffer);
|
|
binder_transaction_buffer_release(target_proc, t->buffer, offp);
|
|
+ if (target_node)
|
|
+ binder_dec_node_tmpref(target_node);
|
|
+ target_node = NULL;
|
|
t->buffer->transaction = NULL;
|
|
- binder_free_buf(target_proc, t->buffer);
|
|
+ binder_alloc_free_buf(&target_proc->alloc, t->buffer);
|
|
err_binder_alloc_buf_failed:
|
|
kfree(tcomplete);
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
@@ -1756,25 +3322,52 @@ err_bad_call_stack:
|
|
err_empty_call_stack:
|
|
err_dead_binder:
|
|
err_invalid_target_handle:
|
|
-err_no_context_mgr_node:
|
|
+ if (target_thread)
|
|
+ binder_thread_dec_tmpref(target_thread);
|
|
+ if (target_proc)
|
|
+ binder_proc_dec_tmpref(target_proc);
|
|
+ if (target_node) {
|
|
+ binder_dec_node(target_node, 1, 0);
|
|
+ binder_dec_node_tmpref(target_node);
|
|
+ }
|
|
+
|
|
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
- "%d:%d transaction failed %d, size %lld-%lld\n",
|
|
- proc->pid, thread->pid, return_error,
|
|
- (u64)tr->data_size, (u64)tr->offsets_size);
|
|
+ "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
|
|
+ proc->pid, thread->pid, return_error, return_error_param,
|
|
+ (u64)tr->data_size, (u64)tr->offsets_size,
|
|
+ return_error_line);
|
|
|
|
{
|
|
struct binder_transaction_log_entry *fe;
|
|
|
|
+ e->return_error = return_error;
|
|
+ e->return_error_param = return_error_param;
|
|
+ e->return_error_line = return_error_line;
|
|
fe = binder_transaction_log_add(&binder_transaction_log_failed);
|
|
*fe = *e;
|
|
+ /*
|
|
+ * write barrier to synchronize with initialization
|
|
+ * of log entry
|
|
+ */
|
|
+ smp_wmb();
|
|
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
|
|
+ WRITE_ONCE(fe->debug_id_done, t_debug_id);
|
|
}
|
|
|
|
- BUG_ON(thread->return_error != BR_OK);
|
|
+ BUG_ON(thread->return_error.cmd != BR_OK);
|
|
if (in_reply_to) {
|
|
- thread->return_error = BR_TRANSACTION_COMPLETE;
|
|
+ binder_restore_priority(current, in_reply_to->saved_priority);
|
|
+ thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
|
|
+ binder_enqueue_work(thread->proc,
|
|
+ &thread->return_error.work,
|
|
+ &thread->todo);
|
|
binder_send_failed_reply(in_reply_to, return_error);
|
|
- } else
|
|
- thread->return_error = return_error;
|
|
+ } else {
|
|
+ thread->return_error.cmd = return_error;
|
|
+ binder_enqueue_work(thread->proc,
|
|
+ &thread->return_error.work,
|
|
+ &thread->todo);
|
|
+ }
|
|
}
|
|
|
|
static int binder_thread_write(struct binder_proc *proc,
|
|
@@ -1783,19 +3376,22 @@ static int binder_thread_write(struct bi
|
|
binder_size_t *consumed)
|
|
{
|
|
uint32_t cmd;
|
|
+ struct binder_context *context = proc->context;
|
|
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
|
|
void __user *ptr = buffer + *consumed;
|
|
void __user *end = buffer + size;
|
|
|
|
- while (ptr < end && thread->return_error == BR_OK) {
|
|
+ while (ptr < end && thread->return_error.cmd == BR_OK) {
|
|
+ int ret;
|
|
+
|
|
if (get_user(cmd, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(uint32_t);
|
|
trace_binder_command(cmd);
|
|
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
|
|
- binder_stats.bc[_IOC_NR(cmd)]++;
|
|
- proc->stats.bc[_IOC_NR(cmd)]++;
|
|
- thread->stats.bc[_IOC_NR(cmd)]++;
|
|
+ atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
|
|
+ atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
|
|
+ atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
|
|
}
|
|
switch (cmd) {
|
|
case BC_INCREFS:
|
|
@@ -1803,53 +3399,61 @@ static int binder_thread_write(struct bi
|
|
case BC_RELEASE:
|
|
case BC_DECREFS: {
|
|
uint32_t target;
|
|
- struct binder_ref *ref;
|
|
const char *debug_string;
|
|
+ bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
|
|
+ bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
|
|
+ struct binder_ref_data rdata;
|
|
|
|
if (get_user(target, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
+
|
|
ptr += sizeof(uint32_t);
|
|
- if (target == 0 && binder_context_mgr_node &&
|
|
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
|
|
- ref = binder_get_ref_for_node(proc,
|
|
- binder_context_mgr_node);
|
|
- if (ref->desc != target) {
|
|
- binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
|
|
- proc->pid, thread->pid,
|
|
- ref->desc);
|
|
- }
|
|
- } else
|
|
- ref = binder_get_ref(proc, target,
|
|
- cmd == BC_ACQUIRE ||
|
|
- cmd == BC_RELEASE);
|
|
- if (ref == NULL) {
|
|
- binder_user_error("%d:%d refcount change on invalid ref %d\n",
|
|
- proc->pid, thread->pid, target);
|
|
- break;
|
|
+ ret = -1;
|
|
+ if (increment && !target) {
|
|
+ struct binder_node *ctx_mgr_node;
|
|
+ mutex_lock(&context->context_mgr_node_lock);
|
|
+ ctx_mgr_node = context->binder_context_mgr_node;
|
|
+ if (ctx_mgr_node)
|
|
+ ret = binder_inc_ref_for_node(
|
|
+ proc, ctx_mgr_node,
|
|
+ strong, NULL, &rdata);
|
|
+ mutex_unlock(&context->context_mgr_node_lock);
|
|
+ }
|
|
+ if (ret)
|
|
+ ret = binder_update_ref_for_handle(
|
|
+ proc, target, increment, strong,
|
|
+ &rdata);
|
|
+ if (!ret && rdata.desc != target) {
|
|
+ binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
|
|
+ proc->pid, thread->pid,
|
|
+ target, rdata.desc);
|
|
}
|
|
switch (cmd) {
|
|
case BC_INCREFS:
|
|
debug_string = "IncRefs";
|
|
- binder_inc_ref(ref, 0, NULL);
|
|
break;
|
|
case BC_ACQUIRE:
|
|
debug_string = "Acquire";
|
|
- binder_inc_ref(ref, 1, NULL);
|
|
break;
|
|
case BC_RELEASE:
|
|
debug_string = "Release";
|
|
- binder_dec_ref(ref, 1);
|
|
break;
|
|
case BC_DECREFS:
|
|
default:
|
|
debug_string = "DecRefs";
|
|
- binder_dec_ref(ref, 0);
|
|
+ break;
|
|
+ }
|
|
+ if (ret) {
|
|
+ binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
|
|
+ proc->pid, thread->pid, debug_string,
|
|
+ strong, target, ret);
|
|
break;
|
|
}
|
|
binder_debug(BINDER_DEBUG_USER_REFS,
|
|
- "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
|
|
- proc->pid, thread->pid, debug_string, ref->debug_id,
|
|
- ref->desc, ref->strong, ref->weak, ref->node->debug_id);
|
|
+ "%d:%d %s ref %d desc %d s %d w %d\n",
|
|
+ proc->pid, thread->pid, debug_string,
|
|
+ rdata.debug_id, rdata.desc, rdata.strong,
|
|
+ rdata.weak);
|
|
break;
|
|
}
|
|
case BC_INCREFS_DONE:
|
|
@@ -1857,6 +3461,7 @@ static int binder_thread_write(struct bi
|
|
binder_uintptr_t node_ptr;
|
|
binder_uintptr_t cookie;
|
|
struct binder_node *node;
|
|
+ bool free_node;
|
|
|
|
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
@@ -1881,13 +3486,17 @@ static int binder_thread_write(struct bi
|
|
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
|
|
(u64)node_ptr, node->debug_id,
|
|
(u64)cookie, (u64)node->cookie);
|
|
+ binder_put_node(node);
|
|
break;
|
|
}
|
|
+ binder_node_inner_lock(node);
|
|
if (cmd == BC_ACQUIRE_DONE) {
|
|
if (node->pending_strong_ref == 0) {
|
|
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
|
|
proc->pid, thread->pid,
|
|
node->debug_id);
|
|
+ binder_node_inner_unlock(node);
|
|
+ binder_put_node(node);
|
|
break;
|
|
}
|
|
node->pending_strong_ref = 0;
|
|
@@ -1896,16 +3505,23 @@ static int binder_thread_write(struct bi
|
|
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
|
|
proc->pid, thread->pid,
|
|
node->debug_id);
|
|
+ binder_node_inner_unlock(node);
|
|
+ binder_put_node(node);
|
|
break;
|
|
}
|
|
node->pending_weak_ref = 0;
|
|
}
|
|
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
|
|
+ free_node = binder_dec_node_nilocked(node,
|
|
+ cmd == BC_ACQUIRE_DONE, 0);
|
|
+ WARN_ON(free_node);
|
|
binder_debug(BINDER_DEBUG_USER_REFS,
|
|
- "%d:%d %s node %d ls %d lw %d\n",
|
|
+ "%d:%d %s node %d ls %d lw %d tr %d\n",
|
|
proc->pid, thread->pid,
|
|
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
|
|
- node->debug_id, node->local_strong_refs, node->local_weak_refs);
|
|
+ node->debug_id, node->local_strong_refs,
|
|
+ node->local_weak_refs, node->tmp_refs);
|
|
+ binder_node_inner_unlock(node);
|
|
+ binder_put_node(node);
|
|
break;
|
|
}
|
|
case BC_ATTEMPT_ACQUIRE:
|
|
@@ -1923,15 +3539,20 @@ static int binder_thread_write(struct bi
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
|
|
- buffer = binder_buffer_lookup(proc, data_ptr);
|
|
- if (buffer == NULL) {
|
|
- binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
|
|
- proc->pid, thread->pid, (u64)data_ptr);
|
|
- break;
|
|
- }
|
|
- if (!buffer->allow_user_free) {
|
|
- binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
|
|
- proc->pid, thread->pid, (u64)data_ptr);
|
|
+ buffer = binder_alloc_prepare_to_free(&proc->alloc,
|
|
+ data_ptr);
|
|
+ if (IS_ERR_OR_NULL(buffer)) {
|
|
+ if (PTR_ERR(buffer) == -EPERM) {
|
|
+ binder_user_error(
|
|
+ "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
|
|
+ proc->pid, thread->pid,
|
|
+ (u64)data_ptr);
|
|
+ } else {
|
|
+ binder_user_error(
|
|
+ "%d:%d BC_FREE_BUFFER u%016llx no match\n",
|
|
+ proc->pid, thread->pid,
|
|
+ (u64)data_ptr);
|
|
+ }
|
|
break;
|
|
}
|
|
binder_debug(BINDER_DEBUG_FREE_BUFFER,
|
|
@@ -1945,18 +3566,41 @@ static int binder_thread_write(struct bi
|
|
buffer->transaction = NULL;
|
|
}
|
|
if (buffer->async_transaction && buffer->target_node) {
|
|
- BUG_ON(!buffer->target_node->has_async_transaction);
|
|
- if (list_empty(&buffer->target_node->async_todo))
|
|
- buffer->target_node->has_async_transaction = 0;
|
|
- else
|
|
- list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
|
|
+ struct binder_node *buf_node;
|
|
+ struct binder_work *w;
|
|
+
|
|
+ buf_node = buffer->target_node;
|
|
+ binder_node_inner_lock(buf_node);
|
|
+ BUG_ON(!buf_node->has_async_transaction);
|
|
+ BUG_ON(buf_node->proc != proc);
|
|
+ w = binder_dequeue_work_head_ilocked(
|
|
+ &buf_node->async_todo);
|
|
+ if (!w) {
|
|
+ buf_node->has_async_transaction = 0;
|
|
+ } else {
|
|
+ binder_enqueue_work_ilocked(
|
|
+ w, &proc->todo);
|
|
+ binder_wakeup_proc_ilocked(proc);
|
|
+ }
|
|
+ binder_node_inner_unlock(buf_node);
|
|
}
|
|
trace_binder_transaction_buffer_release(buffer);
|
|
binder_transaction_buffer_release(proc, buffer, NULL);
|
|
- binder_free_buf(proc, buffer);
|
|
+ binder_alloc_free_buf(&proc->alloc, buffer);
|
|
break;
|
|
}
|
|
|
|
+ case BC_TRANSACTION_SG:
|
|
+ case BC_REPLY_SG: {
|
|
+ struct binder_transaction_data_sg tr;
|
|
+
|
|
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
|
|
+ return -EFAULT;
|
|
+ ptr += sizeof(tr);
|
|
+ binder_transaction(proc, thread, &tr.transaction_data,
|
|
+ cmd == BC_REPLY_SG, tr.buffers_size);
|
|
+ break;
|
|
+ }
|
|
case BC_TRANSACTION:
|
|
case BC_REPLY: {
|
|
struct binder_transaction_data tr;
|
|
@@ -1964,7 +3608,8 @@ static int binder_thread_write(struct bi
|
|
if (copy_from_user(&tr, ptr, sizeof(tr)))
|
|
return -EFAULT;
|
|
ptr += sizeof(tr);
|
|
- binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
|
|
+ binder_transaction(proc, thread, &tr,
|
|
+ cmd == BC_REPLY, 0);
|
|
break;
|
|
}
|
|
|
|
@@ -1972,6 +3617,7 @@ static int binder_thread_write(struct bi
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
|
"%d:%d BC_REGISTER_LOOPER\n",
|
|
proc->pid, thread->pid);
|
|
+ binder_inner_proc_lock(proc);
|
|
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
|
|
thread->looper |= BINDER_LOOPER_STATE_INVALID;
|
|
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
|
|
@@ -1985,6 +3631,7 @@ static int binder_thread_write(struct bi
|
|
proc->requested_threads_started++;
|
|
}
|
|
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
|
|
+ binder_inner_proc_unlock(proc);
|
|
break;
|
|
case BC_ENTER_LOOPER:
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
|
@@ -2009,7 +3656,7 @@ static int binder_thread_write(struct bi
|
|
uint32_t target;
|
|
binder_uintptr_t cookie;
|
|
struct binder_ref *ref;
|
|
- struct binder_ref_death *death;
|
|
+ struct binder_ref_death *death = NULL;
|
|
|
|
if (get_user(target, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
@@ -2017,7 +3664,29 @@ static int binder_thread_write(struct bi
|
|
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
|
|
return -EFAULT;
|
|
ptr += sizeof(binder_uintptr_t);
|
|
- ref = binder_get_ref(proc, target, false);
|
|
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
|
|
+ /*
|
|
+ * Allocate memory for death notification
|
|
+ * before taking lock
|
|
+ */
|
|
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
|
|
+ if (death == NULL) {
|
|
+ WARN_ON(thread->return_error.cmd !=
|
|
+ BR_OK);
|
|
+ thread->return_error.cmd = BR_ERROR;
|
|
+ binder_enqueue_work(
|
|
+ thread->proc,
|
|
+ &thread->return_error.work,
|
|
+ &thread->todo);
|
|
+ binder_debug(
|
|
+ BINDER_DEBUG_FAILED_TRANSACTION,
|
|
+ "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
|
|
+ proc->pid, thread->pid);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ binder_proc_lock(proc);
|
|
+ ref = binder_get_ref_olocked(proc, target, false);
|
|
if (ref == NULL) {
|
|
binder_user_error("%d:%d %s invalid ref %d\n",
|
|
proc->pid, thread->pid,
|
|
@@ -2025,6 +3694,8 @@ static int binder_thread_write(struct bi
|
|
"BC_REQUEST_DEATH_NOTIFICATION" :
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
|
target);
|
|
+ binder_proc_unlock(proc);
|
|
+ kfree(death);
|
|
break;
|
|
}
|
|
|
|
@@ -2034,21 +3705,18 @@ static int binder_thread_write(struct bi
|
|
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
|
|
"BC_REQUEST_DEATH_NOTIFICATION" :
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
|
- (u64)cookie, ref->debug_id, ref->desc,
|
|
- ref->strong, ref->weak, ref->node->debug_id);
|
|
+ (u64)cookie, ref->data.debug_id,
|
|
+ ref->data.desc, ref->data.strong,
|
|
+ ref->data.weak, ref->node->debug_id);
|
|
|
|
+ binder_node_lock(ref->node);
|
|
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
|
|
if (ref->death) {
|
|
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
|
|
proc->pid, thread->pid);
|
|
- break;
|
|
- }
|
|
- death = kzalloc(sizeof(*death), GFP_KERNEL);
|
|
- if (death == NULL) {
|
|
- thread->return_error = BR_ERROR;
|
|
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
|
|
- "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
|
|
- proc->pid, thread->pid);
|
|
+ binder_node_unlock(ref->node);
|
|
+ binder_proc_unlock(proc);
|
|
+ kfree(death);
|
|
break;
|
|
}
|
|
binder_stats_created(BINDER_STAT_DEATH);
|
|
@@ -2057,17 +3725,19 @@ static int binder_thread_write(struct bi
|
|
ref->death = death;
|
|
if (ref->node->proc == NULL) {
|
|
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
|
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
|
|
- list_add_tail(&ref->death->work.entry, &thread->todo);
|
|
- } else {
|
|
- list_add_tail(&ref->death->work.entry, &proc->todo);
|
|
- wake_up_interruptible(&proc->wait);
|
|
- }
|
|
+
|
|
+ binder_inner_proc_lock(proc);
|
|
+ binder_enqueue_work_ilocked(
|
|
+ &ref->death->work, &proc->todo);
|
|
+ binder_wakeup_proc_ilocked(proc);
|
|
+ binder_inner_proc_unlock(proc);
|
|
}
|
|
} else {
|
|
if (ref->death == NULL) {
|
|
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
|
|
proc->pid, thread->pid);
|
|
+ binder_node_unlock(ref->node);
|
|
+ binder_proc_unlock(proc);
|
|
break;
|
|
}
|
|
death = ref->death;
|
|
@@ -2076,22 +3746,35 @@ static int binder_thread_write(struct bi
|
|
proc->pid, thread->pid,
|
|
(u64)death->cookie,
|
|
(u64)cookie);
|
|
+ binder_node_unlock(ref->node);
|
|
+ binder_proc_unlock(proc);
|
|
break;
|
|
}
|
|
ref->death = NULL;
|
|
+ binder_inner_proc_lock(proc);
|
|
if (list_empty(&death->work.entry)) {
|
|
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
|
|
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
|
|
- list_add_tail(&death->work.entry, &thread->todo);
|
|
- } else {
|
|
- list_add_tail(&death->work.entry, &proc->todo);
|
|
- wake_up_interruptible(&proc->wait);
|
|
+ if (thread->looper &
|
|
+ (BINDER_LOOPER_STATE_REGISTERED |
|
|
+ BINDER_LOOPER_STATE_ENTERED))
|
|
+ binder_enqueue_work_ilocked(
|
|
+ &death->work,
|
|
+ &thread->todo);
|
|
+ else {
|
|
+ binder_enqueue_work_ilocked(
|
|
+ &death->work,
|
|
+ &proc->todo);
|
|
+ binder_wakeup_proc_ilocked(
|
|
+ proc);
|
|
}
|
|
} else {
|
|
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
|
|
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
|
|
}
|
|
+ binder_inner_proc_unlock(proc);
|
|
}
|
|
+ binder_node_unlock(ref->node);
|
|
+ binder_proc_unlock(proc);
|
|
} break;
|
|
case BC_DEAD_BINDER_DONE: {
|
|
struct binder_work *w;
|
|
@@ -2102,8 +3785,13 @@ static int binder_thread_write(struct bi
|
|
return -EFAULT;
|
|
|
|
ptr += sizeof(cookie);
|
|
- list_for_each_entry(w, &proc->delivered_death, entry) {
|
|
- struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
|
|
+ binder_inner_proc_lock(proc);
|
|
+ list_for_each_entry(w, &proc->delivered_death,
|
|
+ entry) {
|
|
+ struct binder_ref_death *tmp_death =
|
|
+ container_of(w,
|
|
+ struct binder_ref_death,
|
|
+ work);
|
|
|
|
if (tmp_death->cookie == cookie) {
|
|
death = tmp_death;
|
|
@@ -2117,19 +3805,25 @@ static int binder_thread_write(struct bi
|
|
if (death == NULL) {
|
|
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
|
|
proc->pid, thread->pid, (u64)cookie);
|
|
+ binder_inner_proc_unlock(proc);
|
|
break;
|
|
}
|
|
-
|
|
- list_del_init(&death->work.entry);
|
|
+ binder_dequeue_work_ilocked(&death->work);
|
|
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
|
|
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
|
|
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
|
|
- list_add_tail(&death->work.entry, &thread->todo);
|
|
- } else {
|
|
- list_add_tail(&death->work.entry, &proc->todo);
|
|
- wake_up_interruptible(&proc->wait);
|
|
+ if (thread->looper &
|
|
+ (BINDER_LOOPER_STATE_REGISTERED |
|
|
+ BINDER_LOOPER_STATE_ENTERED))
|
|
+ binder_enqueue_work_ilocked(
|
|
+ &death->work, &thread->todo);
|
|
+ else {
|
|
+ binder_enqueue_work_ilocked(
|
|
+ &death->work,
|
|
+ &proc->todo);
|
|
+ binder_wakeup_proc_ilocked(proc);
|
|
}
|
|
}
|
|
+ binder_inner_proc_unlock(proc);
|
|
} break;
|
|
|
|
default:
|
|
@@ -2147,23 +3841,73 @@ static void binder_stat_br(struct binder
|
|
{
|
|
trace_binder_return(cmd);
|
|
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
|
|
- binder_stats.br[_IOC_NR(cmd)]++;
|
|
- proc->stats.br[_IOC_NR(cmd)]++;
|
|
- thread->stats.br[_IOC_NR(cmd)]++;
|
|
+ atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
|
|
+ atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
|
|
+ atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
|
|
}
|
|
}
|
|
|
|
-static int binder_has_proc_work(struct binder_proc *proc,
|
|
- struct binder_thread *thread)
|
|
+static int binder_put_node_cmd(struct binder_proc *proc,
|
|
+ struct binder_thread *thread,
|
|
+ void __user **ptrp,
|
|
+ binder_uintptr_t node_ptr,
|
|
+ binder_uintptr_t node_cookie,
|
|
+ int node_debug_id,
|
|
+ uint32_t cmd, const char *cmd_name)
|
|
{
|
|
- return !list_empty(&proc->todo) ||
|
|
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
|
|
+ void __user *ptr = *ptrp;
|
|
+
|
|
+ if (put_user(cmd, (uint32_t __user *)ptr))
|
|
+ return -EFAULT;
|
|
+ ptr += sizeof(uint32_t);
|
|
+
|
|
+ if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
|
|
+ return -EFAULT;
|
|
+ ptr += sizeof(binder_uintptr_t);
|
|
+
|
|
+ if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
|
|
+ return -EFAULT;
|
|
+ ptr += sizeof(binder_uintptr_t);
|
|
+
|
|
+ binder_stat_br(proc, thread, cmd);
|
|
+ binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
|
|
+ proc->pid, thread->pid, cmd_name, node_debug_id,
|
|
+ (u64)node_ptr, (u64)node_cookie);
|
|
+
|
|
+ *ptrp = ptr;
|
|
+ return 0;
|
|
}
|
|
|
|
-static int binder_has_thread_work(struct binder_thread *thread)
|
|
+static int binder_wait_for_work(struct binder_thread *thread,
|
|
+ bool do_proc_work)
|
|
{
|
|
- return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
|
|
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
|
|
+ DEFINE_WAIT(wait);
|
|
+ struct binder_proc *proc = thread->proc;
|
|
+ int ret = 0;
|
|
+
|
|
+ freezer_do_not_count();
|
|
+ binder_inner_proc_lock(proc);
|
|
+ for (;;) {
|
|
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
|
|
+ if (binder_has_work_ilocked(thread, do_proc_work))
|
|
+ break;
|
|
+ if (do_proc_work)
|
|
+ list_add(&thread->waiting_thread_node,
|
|
+ &proc->waiting_threads);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ schedule();
|
|
+ binder_inner_proc_lock(proc);
|
|
+ list_del_init(&thread->waiting_thread_node);
|
|
+ if (signal_pending(current)) {
|
|
+ ret = -ERESTARTSYS;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ finish_wait(&thread->wait, &wait);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ freezer_count();
|
|
+
|
|
+ return ret;
|
|
}
|
|
|
|
static int binder_thread_read(struct binder_proc *proc,
|
|
@@ -2185,37 +3929,15 @@ static int binder_thread_read(struct bin
|
|
}
|
|
|
|
retry:
|
|
- wait_for_proc_work = thread->transaction_stack == NULL &&
|
|
- list_empty(&thread->todo);
|
|
-
|
|
- if (thread->return_error != BR_OK && ptr < end) {
|
|
- if (thread->return_error2 != BR_OK) {
|
|
- if (put_user(thread->return_error2, (uint32_t __user *)ptr))
|
|
- return -EFAULT;
|
|
- ptr += sizeof(uint32_t);
|
|
- binder_stat_br(proc, thread, thread->return_error2);
|
|
- if (ptr == end)
|
|
- goto done;
|
|
- thread->return_error2 = BR_OK;
|
|
- }
|
|
- if (put_user(thread->return_error, (uint32_t __user *)ptr))
|
|
- return -EFAULT;
|
|
- ptr += sizeof(uint32_t);
|
|
- binder_stat_br(proc, thread, thread->return_error);
|
|
- thread->return_error = BR_OK;
|
|
- goto done;
|
|
- }
|
|
-
|
|
+ binder_inner_proc_lock(proc);
|
|
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
|
thread->looper |= BINDER_LOOPER_STATE_WAITING;
|
|
- if (wait_for_proc_work)
|
|
- proc->ready_threads++;
|
|
-
|
|
- binder_unlock(__func__);
|
|
|
|
trace_binder_wait_for_work(wait_for_proc_work,
|
|
!!thread->transaction_stack,
|
|
- !list_empty(&thread->todo));
|
|
+ !binder_worklist_empty(proc, &thread->todo));
|
|
if (wait_for_proc_work) {
|
|
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
BINDER_LOOPER_STATE_ENTERED))) {
|
|
@@ -2224,24 +3946,16 @@ retry:
|
|
wait_event_interruptible(binder_user_error_wait,
|
|
binder_stop_on_user_error < 2);
|
|
}
|
|
- binder_set_nice(proc->default_priority);
|
|
- if (non_block) {
|
|
- if (!binder_has_proc_work(proc, thread))
|
|
- ret = -EAGAIN;
|
|
- } else
|
|
- ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
|
|
- } else {
|
|
- if (non_block) {
|
|
- if (!binder_has_thread_work(thread))
|
|
- ret = -EAGAIN;
|
|
- } else
|
|
- ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
|
|
+ binder_restore_priority(current, proc->default_priority);
|
|
}
|
|
|
|
- binder_lock(__func__);
|
|
+ if (non_block) {
|
|
+ if (!binder_has_work(thread, wait_for_proc_work))
|
|
+ ret = -EAGAIN;
|
|
+ } else {
|
|
+ ret = binder_wait_for_work(thread, wait_for_proc_work);
|
|
+ }
|
|
|
|
- if (wait_for_proc_work)
|
|
- proc->ready_threads--;
|
|
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
|
|
|
|
if (ret)
|
|
@@ -2250,31 +3964,52 @@ retry:
|
|
while (1) {
|
|
uint32_t cmd;
|
|
struct binder_transaction_data tr;
|
|
- struct binder_work *w;
|
|
+ struct binder_work *w = NULL;
|
|
+ struct list_head *list = NULL;
|
|
struct binder_transaction *t = NULL;
|
|
+ struct binder_thread *t_from;
|
|
+
|
|
+ binder_inner_proc_lock(proc);
|
|
+ if (!binder_worklist_empty_ilocked(&thread->todo))
|
|
+ list = &thread->todo;
|
|
+ else if (!binder_worklist_empty_ilocked(&proc->todo) &&
|
|
+ wait_for_proc_work)
|
|
+ list = &proc->todo;
|
|
+ else {
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
|
- if (!list_empty(&thread->todo)) {
|
|
- w = list_first_entry(&thread->todo, struct binder_work,
|
|
- entry);
|
|
- } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
|
|
- w = list_first_entry(&proc->todo, struct binder_work,
|
|
- entry);
|
|
- } else {
|
|
/* no data added */
|
|
- if (ptr - buffer == 4 &&
|
|
- !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
|
|
+ if (ptr - buffer == 4 && !thread->looper_need_return)
|
|
goto retry;
|
|
break;
|
|
}
|
|
|
|
- if (end - ptr < sizeof(tr) + 4)
|
|
+ if (end - ptr < sizeof(tr) + 4) {
|
|
+ binder_inner_proc_unlock(proc);
|
|
break;
|
|
+ }
|
|
+ w = binder_dequeue_work_head_ilocked(list);
|
|
|
|
switch (w->type) {
|
|
case BINDER_WORK_TRANSACTION: {
|
|
+ binder_inner_proc_unlock(proc);
|
|
t = container_of(w, struct binder_transaction, work);
|
|
} break;
|
|
+ case BINDER_WORK_RETURN_ERROR: {
|
|
+ struct binder_error *e = container_of(
|
|
+ w, struct binder_error, work);
|
|
+
|
|
+ WARN_ON(e->cmd == BR_OK);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ if (put_user(e->cmd, (uint32_t __user *)ptr))
|
|
+ return -EFAULT;
|
|
+ e->cmd = BR_OK;
|
|
+ ptr += sizeof(uint32_t);
|
|
+
|
|
+ binder_stat_br(proc, thread, cmd);
|
|
+ } break;
|
|
case BINDER_WORK_TRANSACTION_COMPLETE: {
|
|
+ binder_inner_proc_unlock(proc);
|
|
cmd = BR_TRANSACTION_COMPLETE;
|
|
if (put_user(cmd, (uint32_t __user *)ptr))
|
|
return -EFAULT;
|
|
@@ -2284,113 +4019,134 @@ retry:
|
|
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
|
|
"%d:%d BR_TRANSACTION_COMPLETE\n",
|
|
proc->pid, thread->pid);
|
|
-
|
|
- list_del(&w->entry);
|
|
kfree(w);
|
|
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
|
|
} break;
|
|
case BINDER_WORK_NODE: {
|
|
struct binder_node *node = container_of(w, struct binder_node, work);
|
|
- uint32_t cmd = BR_NOOP;
|
|
- const char *cmd_name;
|
|
- int strong = node->internal_strong_refs || node->local_strong_refs;
|
|
- int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
|
|
+ int strong, weak;
|
|
+ binder_uintptr_t node_ptr = node->ptr;
|
|
+ binder_uintptr_t node_cookie = node->cookie;
|
|
+ int node_debug_id = node->debug_id;
|
|
+ int has_weak_ref;
|
|
+ int has_strong_ref;
|
|
+ void __user *orig_ptr = ptr;
|
|
|
|
- if (weak && !node->has_weak_ref) {
|
|
- cmd = BR_INCREFS;
|
|
- cmd_name = "BR_INCREFS";
|
|
+ BUG_ON(proc != node->proc);
|
|
+ strong = node->internal_strong_refs ||
|
|
+ node->local_strong_refs;
|
|
+ weak = !hlist_empty(&node->refs) ||
|
|
+ node->local_weak_refs ||
|
|
+ node->tmp_refs || strong;
|
|
+ has_strong_ref = node->has_strong_ref;
|
|
+ has_weak_ref = node->has_weak_ref;
|
|
+
|
|
+ if (weak && !has_weak_ref) {
|
|
node->has_weak_ref = 1;
|
|
node->pending_weak_ref = 1;
|
|
node->local_weak_refs++;
|
|
- } else if (strong && !node->has_strong_ref) {
|
|
- cmd = BR_ACQUIRE;
|
|
- cmd_name = "BR_ACQUIRE";
|
|
+ }
|
|
+ if (strong && !has_strong_ref) {
|
|
node->has_strong_ref = 1;
|
|
node->pending_strong_ref = 1;
|
|
node->local_strong_refs++;
|
|
- } else if (!strong && node->has_strong_ref) {
|
|
- cmd = BR_RELEASE;
|
|
- cmd_name = "BR_RELEASE";
|
|
+ }
|
|
+ if (!strong && has_strong_ref)
|
|
node->has_strong_ref = 0;
|
|
- } else if (!weak && node->has_weak_ref) {
|
|
- cmd = BR_DECREFS;
|
|
- cmd_name = "BR_DECREFS";
|
|
+ if (!weak && has_weak_ref)
|
|
node->has_weak_ref = 0;
|
|
- }
|
|
- if (cmd != BR_NOOP) {
|
|
- if (put_user(cmd, (uint32_t __user *)ptr))
|
|
- return -EFAULT;
|
|
- ptr += sizeof(uint32_t);
|
|
- if (put_user(node->ptr,
|
|
- (binder_uintptr_t __user *)ptr))
|
|
- return -EFAULT;
|
|
- ptr += sizeof(binder_uintptr_t);
|
|
- if (put_user(node->cookie,
|
|
- (binder_uintptr_t __user *)ptr))
|
|
- return -EFAULT;
|
|
- ptr += sizeof(binder_uintptr_t);
|
|
+ if (!weak && !strong) {
|
|
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
+ "%d:%d node %d u%016llx c%016llx deleted\n",
|
|
+ proc->pid, thread->pid,
|
|
+ node_debug_id,
|
|
+ (u64)node_ptr,
|
|
+ (u64)node_cookie);
|
|
+ rb_erase(&node->rb_node, &proc->nodes);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ binder_node_lock(node);
|
|
+ /*
|
|
+ * Acquire the node lock before freeing the
|
|
+ * node to serialize with other threads that
|
|
+ * may have been holding the node lock while
|
|
+ * decrementing this node (avoids race where
|
|
+ * this thread frees while the other thread
|
|
+ * is unlocking the node after the final
|
|
+ * decrement)
|
|
+ */
|
|
+ binder_node_unlock(node);
|
|
+ binder_free_node(node);
|
|
+ } else
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
|
- binder_stat_br(proc, thread, cmd);
|
|
- binder_debug(BINDER_DEBUG_USER_REFS,
|
|
- "%d:%d %s %d u%016llx c%016llx\n",
|
|
- proc->pid, thread->pid, cmd_name,
|
|
- node->debug_id,
|
|
- (u64)node->ptr, (u64)node->cookie);
|
|
- } else {
|
|
- list_del_init(&w->entry);
|
|
- if (!weak && !strong) {
|
|
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
- "%d:%d node %d u%016llx c%016llx deleted\n",
|
|
- proc->pid, thread->pid,
|
|
- node->debug_id,
|
|
- (u64)node->ptr,
|
|
- (u64)node->cookie);
|
|
- rb_erase(&node->rb_node, &proc->nodes);
|
|
- kfree(node);
|
|
- binder_stats_deleted(BINDER_STAT_NODE);
|
|
- } else {
|
|
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
- "%d:%d node %d u%016llx c%016llx state unchanged\n",
|
|
- proc->pid, thread->pid,
|
|
- node->debug_id,
|
|
- (u64)node->ptr,
|
|
- (u64)node->cookie);
|
|
- }
|
|
- }
|
|
+ if (weak && !has_weak_ref)
|
|
+ ret = binder_put_node_cmd(
|
|
+ proc, thread, &ptr, node_ptr,
|
|
+ node_cookie, node_debug_id,
|
|
+ BR_INCREFS, "BR_INCREFS");
|
|
+ if (!ret && strong && !has_strong_ref)
|
|
+ ret = binder_put_node_cmd(
|
|
+ proc, thread, &ptr, node_ptr,
|
|
+ node_cookie, node_debug_id,
|
|
+ BR_ACQUIRE, "BR_ACQUIRE");
|
|
+ if (!ret && !strong && has_strong_ref)
|
|
+ ret = binder_put_node_cmd(
|
|
+ proc, thread, &ptr, node_ptr,
|
|
+ node_cookie, node_debug_id,
|
|
+ BR_RELEASE, "BR_RELEASE");
|
|
+ if (!ret && !weak && has_weak_ref)
|
|
+ ret = binder_put_node_cmd(
|
|
+ proc, thread, &ptr, node_ptr,
|
|
+ node_cookie, node_debug_id,
|
|
+ BR_DECREFS, "BR_DECREFS");
|
|
+ if (orig_ptr == ptr)
|
|
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
|
|
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
|
|
+ proc->pid, thread->pid,
|
|
+ node_debug_id,
|
|
+ (u64)node_ptr,
|
|
+ (u64)node_cookie);
|
|
+ if (ret)
|
|
+ return ret;
|
|
} break;
|
|
case BINDER_WORK_DEAD_BINDER:
|
|
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
|
|
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
|
|
struct binder_ref_death *death;
|
|
uint32_t cmd;
|
|
+ binder_uintptr_t cookie;
|
|
|
|
death = container_of(w, struct binder_ref_death, work);
|
|
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
|
|
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
|
|
else
|
|
cmd = BR_DEAD_BINDER;
|
|
- if (put_user(cmd, (uint32_t __user *)ptr))
|
|
- return -EFAULT;
|
|
- ptr += sizeof(uint32_t);
|
|
- if (put_user(death->cookie,
|
|
- (binder_uintptr_t __user *)ptr))
|
|
- return -EFAULT;
|
|
- ptr += sizeof(binder_uintptr_t);
|
|
- binder_stat_br(proc, thread, cmd);
|
|
+ cookie = death->cookie;
|
|
+
|
|
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
|
|
"%d:%d %s %016llx\n",
|
|
proc->pid, thread->pid,
|
|
cmd == BR_DEAD_BINDER ?
|
|
"BR_DEAD_BINDER" :
|
|
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
|
|
- (u64)death->cookie);
|
|
-
|
|
+ (u64)cookie);
|
|
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
|
|
- list_del(&w->entry);
|
|
+ binder_inner_proc_unlock(proc);
|
|
kfree(death);
|
|
binder_stats_deleted(BINDER_STAT_DEATH);
|
|
- } else
|
|
- list_move(&w->entry, &proc->delivered_death);
|
|
+ } else {
|
|
+ binder_enqueue_work_ilocked(
|
|
+ w, &proc->delivered_death);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ }
|
|
+ if (put_user(cmd, (uint32_t __user *)ptr))
|
|
+ return -EFAULT;
|
|
+ ptr += sizeof(uint32_t);
|
|
+ if (put_user(cookie,
|
|
+ (binder_uintptr_t __user *)ptr))
|
|
+ return -EFAULT;
|
|
+ ptr += sizeof(binder_uintptr_t);
|
|
+ binder_stat_br(proc, thread, cmd);
|
|
if (cmd == BR_DEAD_BINDER)
|
|
goto done; /* DEAD_BINDER notifications can cause transactions */
|
|
} break;
|
|
@@ -2402,16 +4158,14 @@ retry:
|
|
BUG_ON(t->buffer == NULL);
|
|
if (t->buffer->target_node) {
|
|
struct binder_node *target_node = t->buffer->target_node;
|
|
+ struct binder_priority node_prio;
|
|
|
|
tr.target.ptr = target_node->ptr;
|
|
tr.cookie = target_node->cookie;
|
|
- t->saved_priority = task_nice(current);
|
|
- if (t->priority < target_node->min_priority &&
|
|
- !(t->flags & TF_ONE_WAY))
|
|
- binder_set_nice(t->priority);
|
|
- else if (!(t->flags & TF_ONE_WAY) ||
|
|
- t->saved_priority > target_node->min_priority)
|
|
- binder_set_nice(target_node->min_priority);
|
|
+ node_prio.sched_policy = target_node->sched_policy;
|
|
+ node_prio.prio = target_node->min_priority;
|
|
+ binder_transaction_priority(current, t, node_prio,
|
|
+ target_node->inherit_rt);
|
|
cmd = BR_TRANSACTION;
|
|
} else {
|
|
tr.target.ptr = 0;
|
|
@@ -2422,8 +4176,9 @@ retry:
|
|
tr.flags = t->flags;
|
|
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
|
|
|
|
- if (t->from) {
|
|
- struct task_struct *sender = t->from->proc->tsk;
|
|
+ t_from = binder_get_txn_from(t);
|
|
+ if (t_from) {
|
|
+ struct task_struct *sender = t_from->proc->tsk;
|
|
|
|
tr.sender_pid = task_tgid_nr_ns(sender,
|
|
task_active_pid_ns(current));
|
|
@@ -2433,18 +4188,24 @@ retry:
|
|
|
|
tr.data_size = t->buffer->data_size;
|
|
tr.offsets_size = t->buffer->offsets_size;
|
|
- tr.data.ptr.buffer = (binder_uintptr_t)(
|
|
- (uintptr_t)t->buffer->data +
|
|
- proc->user_buffer_offset);
|
|
+ tr.data.ptr.buffer = (binder_uintptr_t)
|
|
+ ((uintptr_t)t->buffer->data +
|
|
+ binder_alloc_get_user_buffer_offset(&proc->alloc));
|
|
tr.data.ptr.offsets = tr.data.ptr.buffer +
|
|
ALIGN(t->buffer->data_size,
|
|
sizeof(void *));
|
|
|
|
- if (put_user(cmd, (uint32_t __user *)ptr))
|
|
+ if (put_user(cmd, (uint32_t __user *)ptr)) {
|
|
+ if (t_from)
|
|
+ binder_thread_dec_tmpref(t_from);
|
|
return -EFAULT;
|
|
+ }
|
|
ptr += sizeof(uint32_t);
|
|
- if (copy_to_user(ptr, &tr, sizeof(tr)))
|
|
+ if (copy_to_user(ptr, &tr, sizeof(tr))) {
|
|
+ if (t_from)
|
|
+ binder_thread_dec_tmpref(t_from);
|
|
return -EFAULT;
|
|
+ }
|
|
ptr += sizeof(tr);
|
|
|
|
trace_binder_transaction_received(t);
|
|
@@ -2454,21 +4215,22 @@ retry:
|
|
proc->pid, thread->pid,
|
|
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
|
|
"BR_REPLY",
|
|
- t->debug_id, t->from ? t->from->proc->pid : 0,
|
|
- t->from ? t->from->pid : 0, cmd,
|
|
+ t->debug_id, t_from ? t_from->proc->pid : 0,
|
|
+ t_from ? t_from->pid : 0, cmd,
|
|
t->buffer->data_size, t->buffer->offsets_size,
|
|
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
|
|
|
|
- list_del(&t->work.entry);
|
|
+ if (t_from)
|
|
+ binder_thread_dec_tmpref(t_from);
|
|
t->buffer->allow_user_free = 1;
|
|
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
|
|
+ binder_inner_proc_lock(thread->proc);
|
|
t->to_parent = thread->transaction_stack;
|
|
t->to_thread = thread;
|
|
thread->transaction_stack = t;
|
|
+ binder_inner_proc_unlock(thread->proc);
|
|
} else {
|
|
- t->buffer->transaction = NULL;
|
|
- kfree(t);
|
|
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
+ binder_free_transaction(t);
|
|
}
|
|
break;
|
|
}
|
|
@@ -2476,29 +4238,36 @@ retry:
|
|
done:
|
|
|
|
*consumed = ptr - buffer;
|
|
- if (proc->requested_threads + proc->ready_threads == 0 &&
|
|
+ binder_inner_proc_lock(proc);
|
|
+ if (proc->requested_threads == 0 &&
|
|
+ list_empty(&thread->proc->waiting_threads) &&
|
|
proc->requested_threads_started < proc->max_threads &&
|
|
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
|
|
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
|
|
/*spawn a new thread if we leave this out */) {
|
|
proc->requested_threads++;
|
|
+ binder_inner_proc_unlock(proc);
|
|
binder_debug(BINDER_DEBUG_THREADS,
|
|
"%d:%d BR_SPAWN_LOOPER\n",
|
|
proc->pid, thread->pid);
|
|
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
|
|
return -EFAULT;
|
|
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
|
|
- }
|
|
+ } else
|
|
+ binder_inner_proc_unlock(proc);
|
|
return 0;
|
|
}
|
|
|
|
-static void binder_release_work(struct list_head *list)
|
|
+static void binder_release_work(struct binder_proc *proc,
|
|
+ struct list_head *list)
|
|
{
|
|
struct binder_work *w;
|
|
|
|
- while (!list_empty(list)) {
|
|
- w = list_first_entry(list, struct binder_work, entry);
|
|
- list_del_init(&w->entry);
|
|
+ while (1) {
|
|
+ w = binder_dequeue_work_head(proc, list);
|
|
+ if (!w)
|
|
+ return;
|
|
+
|
|
switch (w->type) {
|
|
case BINDER_WORK_TRANSACTION: {
|
|
struct binder_transaction *t;
|
|
@@ -2511,11 +4280,17 @@ static void binder_release_work(struct l
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
"undelivered transaction %d\n",
|
|
t->debug_id);
|
|
- t->buffer->transaction = NULL;
|
|
- kfree(t);
|
|
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
|
+ binder_free_transaction(t);
|
|
}
|
|
} break;
|
|
+ case BINDER_WORK_RETURN_ERROR: {
|
|
+ struct binder_error *e = container_of(
|
|
+ w, struct binder_error, work);
|
|
+
|
|
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
+ "undelivered TRANSACTION_ERROR: %u\n",
|
|
+ e->cmd);
|
|
+ } break;
|
|
case BINDER_WORK_TRANSACTION_COMPLETE: {
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
"undelivered TRANSACTION_COMPLETE\n");
|
|
@@ -2542,7 +4317,8 @@ static void binder_release_work(struct l
|
|
|
|
}
|
|
|
|
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
|
|
+static struct binder_thread *binder_get_thread_ilocked(
|
|
+ struct binder_proc *proc, struct binder_thread *new_thread)
|
|
{
|
|
struct binder_thread *thread = NULL;
|
|
struct rb_node *parent = NULL;
|
|
@@ -2557,38 +4333,102 @@ static struct binder_thread *binder_get_
|
|
else if (current->pid > thread->pid)
|
|
p = &(*p)->rb_right;
|
|
else
|
|
- break;
|
|
+ return thread;
|
|
}
|
|
- if (*p == NULL) {
|
|
- thread = kzalloc(sizeof(*thread), GFP_KERNEL);
|
|
- if (thread == NULL)
|
|
+ if (!new_thread)
|
|
+ return NULL;
|
|
+ thread = new_thread;
|
|
+ binder_stats_created(BINDER_STAT_THREAD);
|
|
+ thread->proc = proc;
|
|
+ thread->pid = current->pid;
|
|
+ get_task_struct(current);
|
|
+ thread->task = current;
|
|
+ atomic_set(&thread->tmp_ref, 0);
|
|
+ init_waitqueue_head(&thread->wait);
|
|
+ INIT_LIST_HEAD(&thread->todo);
|
|
+ rb_link_node(&thread->rb_node, parent, p);
|
|
+ rb_insert_color(&thread->rb_node, &proc->threads);
|
|
+ thread->looper_need_return = true;
|
|
+ thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
|
|
+ thread->return_error.cmd = BR_OK;
|
|
+ thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
|
|
+ thread->reply_error.cmd = BR_OK;
|
|
+ INIT_LIST_HEAD(&new_thread->waiting_thread_node);
|
|
+ return thread;
|
|
+}
|
|
+
|
|
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
|
|
+{
|
|
+ struct binder_thread *thread;
|
|
+ struct binder_thread *new_thread;
|
|
+
|
|
+ binder_inner_proc_lock(proc);
|
|
+ thread = binder_get_thread_ilocked(proc, NULL);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ if (!thread) {
|
|
+ new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
|
|
+ if (new_thread == NULL)
|
|
return NULL;
|
|
- binder_stats_created(BINDER_STAT_THREAD);
|
|
- thread->proc = proc;
|
|
- thread->pid = current->pid;
|
|
- init_waitqueue_head(&thread->wait);
|
|
- INIT_LIST_HEAD(&thread->todo);
|
|
- rb_link_node(&thread->rb_node, parent, p);
|
|
- rb_insert_color(&thread->rb_node, &proc->threads);
|
|
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
|
|
- thread->return_error = BR_OK;
|
|
- thread->return_error2 = BR_OK;
|
|
+ binder_inner_proc_lock(proc);
|
|
+ thread = binder_get_thread_ilocked(proc, new_thread);
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ if (thread != new_thread)
|
|
+ kfree(new_thread);
|
|
}
|
|
return thread;
|
|
}
|
|
|
|
-static int binder_free_thread(struct binder_proc *proc,
|
|
- struct binder_thread *thread)
|
|
+static void binder_free_proc(struct binder_proc *proc)
|
|
+{
|
|
+ BUG_ON(!list_empty(&proc->todo));
|
|
+ BUG_ON(!list_empty(&proc->delivered_death));
|
|
+ binder_alloc_deferred_release(&proc->alloc);
|
|
+ put_task_struct(proc->tsk);
|
|
+ binder_stats_deleted(BINDER_STAT_PROC);
|
|
+ kfree(proc);
|
|
+}
|
|
+
|
|
+static void binder_free_thread(struct binder_thread *thread)
|
|
+{
|
|
+ BUG_ON(!list_empty(&thread->todo));
|
|
+ binder_stats_deleted(BINDER_STAT_THREAD);
|
|
+ binder_proc_dec_tmpref(thread->proc);
|
|
+ put_task_struct(thread->task);
|
|
+ kfree(thread);
|
|
+}
|
|
+
|
|
+static int binder_thread_release(struct binder_proc *proc,
|
|
+ struct binder_thread *thread)
|
|
{
|
|
struct binder_transaction *t;
|
|
struct binder_transaction *send_reply = NULL;
|
|
int active_transactions = 0;
|
|
+ struct binder_transaction *last_t = NULL;
|
|
|
|
+ binder_inner_proc_lock(thread->proc);
|
|
+ /*
|
|
+ * take a ref on the proc so it survives
|
|
+ * after we remove this thread from proc->threads.
|
|
+ * The corresponding dec is when we actually
|
|
+ * free the thread in binder_free_thread()
|
|
+ */
|
|
+ proc->tmp_ref++;
|
|
+ /*
|
|
+ * take a ref on this thread to ensure it
|
|
+ * survives while we are releasing it
|
|
+ */
|
|
+ atomic_inc(&thread->tmp_ref);
|
|
rb_erase(&thread->rb_node, &proc->threads);
|
|
t = thread->transaction_stack;
|
|
- if (t && t->to_thread == thread)
|
|
- send_reply = t;
|
|
+ if (t) {
|
|
+ spin_lock(&t->lock);
|
|
+ if (t->to_thread == thread)
|
|
+ send_reply = t;
|
|
+ }
|
|
+ thread->is_dead = true;
|
|
+
|
|
while (t) {
|
|
+ last_t = t;
|
|
active_transactions++;
|
|
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
|
|
"release %d:%d transaction %d %s, still active\n",
|
|
@@ -2609,12 +4449,16 @@ static int binder_free_thread(struct bin
|
|
t = t->from_parent;
|
|
} else
|
|
BUG();
|
|
+ spin_unlock(&last_t->lock);
|
|
+ if (t)
|
|
+ spin_lock(&t->lock);
|
|
}
|
|
+ binder_inner_proc_unlock(thread->proc);
|
|
+
|
|
if (send_reply)
|
|
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
|
|
- binder_release_work(&thread->todo);
|
|
- kfree(thread);
|
|
- binder_stats_deleted(BINDER_STAT_THREAD);
|
|
+ binder_release_work(proc, &thread->todo);
|
|
+ binder_thread_dec_tmpref(thread);
|
|
return active_transactions;
|
|
}
|
|
|
|
@@ -2623,34 +4467,23 @@ static unsigned int binder_poll(struct f
|
|
{
|
|
struct binder_proc *proc = filp->private_data;
|
|
struct binder_thread *thread = NULL;
|
|
- int wait_for_proc_work;
|
|
-
|
|
- binder_lock(__func__);
|
|
+ bool wait_for_proc_work;
|
|
|
|
thread = binder_get_thread(proc);
|
|
- if (!thread) {
|
|
- binder_unlock(__func__);
|
|
+ if (!thread)
|
|
return POLLERR;
|
|
- }
|
|
|
|
- wait_for_proc_work = thread->transaction_stack == NULL &&
|
|
- list_empty(&thread->todo) && thread->return_error == BR_OK;
|
|
+ binder_inner_proc_lock(thread->proc);
|
|
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
|
|
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
|
|
|
|
- binder_unlock(__func__);
|
|
+ binder_inner_proc_unlock(thread->proc);
|
|
+
|
|
+ poll_wait(filp, &thread->wait, wait);
|
|
+
|
|
+ if (binder_has_work(thread, wait_for_proc_work))
|
|
+ return POLLIN;
|
|
|
|
- if (wait_for_proc_work) {
|
|
- if (binder_has_proc_work(proc, thread))
|
|
- return POLLIN;
|
|
- poll_wait(filp, &proc->wait, wait);
|
|
- if (binder_has_proc_work(proc, thread))
|
|
- return POLLIN;
|
|
- } else {
|
|
- if (binder_has_thread_work(thread))
|
|
- return POLLIN;
|
|
- poll_wait(filp, &thread->wait, wait);
|
|
- if (binder_has_thread_work(thread))
|
|
- return POLLIN;
|
|
- }
|
|
return 0;
|
|
}
|
|
|
|
@@ -2697,8 +4530,10 @@ static int binder_ioctl_write_read(struc
|
|
&bwr.read_consumed,
|
|
filp->f_flags & O_NONBLOCK);
|
|
trace_binder_read_done(ret);
|
|
- if (!list_empty(&proc->todo))
|
|
- wake_up_interruptible(&proc->wait);
|
|
+ binder_inner_proc_lock(proc);
|
|
+ if (!binder_worklist_empty_ilocked(&proc->todo))
|
|
+ binder_wakeup_proc_ilocked(proc);
|
|
+ binder_inner_proc_unlock(proc);
|
|
if (ret < 0) {
|
|
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
|
|
ret = -EFAULT;
|
|
@@ -2722,9 +4557,12 @@ static int binder_ioctl_set_ctx_mgr(stru
|
|
{
|
|
int ret = 0;
|
|
struct binder_proc *proc = filp->private_data;
|
|
+ struct binder_context *context = proc->context;
|
|
+ struct binder_node *new_node;
|
|
kuid_t curr_euid = current_euid();
|
|
|
|
- if (binder_context_mgr_node != NULL) {
|
|
+ mutex_lock(&context->context_mgr_node_lock);
|
|
+ if (context->binder_context_mgr_node) {
|
|
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
|
|
ret = -EBUSY;
|
|
goto out;
|
|
@@ -2732,31 +4570,60 @@ static int binder_ioctl_set_ctx_mgr(stru
|
|
ret = security_binder_set_context_mgr(proc->tsk);
|
|
if (ret < 0)
|
|
goto out;
|
|
- if (uid_valid(binder_context_mgr_uid)) {
|
|
- if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
|
|
+ if (uid_valid(context->binder_context_mgr_uid)) {
|
|
+ if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
|
|
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
|
|
from_kuid(&init_user_ns, curr_euid),
|
|
from_kuid(&init_user_ns,
|
|
- binder_context_mgr_uid));
|
|
+ context->binder_context_mgr_uid));
|
|
ret = -EPERM;
|
|
goto out;
|
|
}
|
|
} else {
|
|
- binder_context_mgr_uid = curr_euid;
|
|
+ context->binder_context_mgr_uid = curr_euid;
|
|
}
|
|
- binder_context_mgr_node = binder_new_node(proc, 0, 0);
|
|
- if (binder_context_mgr_node == NULL) {
|
|
+ new_node = binder_new_node(proc, NULL);
|
|
+ if (!new_node) {
|
|
ret = -ENOMEM;
|
|
goto out;
|
|
}
|
|
- binder_context_mgr_node->local_weak_refs++;
|
|
- binder_context_mgr_node->local_strong_refs++;
|
|
- binder_context_mgr_node->has_strong_ref = 1;
|
|
- binder_context_mgr_node->has_weak_ref = 1;
|
|
+ binder_node_lock(new_node);
|
|
+ new_node->local_weak_refs++;
|
|
+ new_node->local_strong_refs++;
|
|
+ new_node->has_strong_ref = 1;
|
|
+ new_node->has_weak_ref = 1;
|
|
+ context->binder_context_mgr_node = new_node;
|
|
+ binder_node_unlock(new_node);
|
|
+ binder_put_node(new_node);
|
|
out:
|
|
+ mutex_unlock(&context->context_mgr_node_lock);
|
|
return ret;
|
|
}
|
|
|
|
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
|
|
+ struct binder_node_debug_info *info) {
|
|
+ struct rb_node *n;
|
|
+ binder_uintptr_t ptr = info->ptr;
|
|
+
|
|
+ memset(info, 0, sizeof(*info));
|
|
+
|
|
+ binder_inner_proc_lock(proc);
|
|
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
|
|
+ struct binder_node *node = rb_entry(n, struct binder_node,
|
|
+ rb_node);
|
|
+ if (node->ptr > ptr) {
|
|
+ info->ptr = node->ptr;
|
|
+ info->cookie = node->cookie;
|
|
+ info->has_strong_ref = node->has_strong_ref;
|
|
+ info->has_weak_ref = node->has_weak_ref;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ binder_inner_proc_unlock(proc);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
|
{
|
|
int ret;
|
|
@@ -2768,13 +4635,14 @@ static long binder_ioctl(struct file *fi
|
|
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
|
|
proc->pid, current->pid, cmd, arg);*/
|
|
|
|
+ binder_selftest_alloc(&proc->alloc);
|
|
+
|
|
trace_binder_ioctl(cmd, arg);
|
|
|
|
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
|
if (ret)
|
|
goto err_unlocked;
|
|
|
|
- binder_lock(__func__);
|
|
thread = binder_get_thread(proc);
|
|
if (thread == NULL) {
|
|
ret = -ENOMEM;
|
|
@@ -2787,12 +4655,19 @@ static long binder_ioctl(struct file *fi
|
|
if (ret)
|
|
goto err;
|
|
break;
|
|
- case BINDER_SET_MAX_THREADS:
|
|
- if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
|
|
+ case BINDER_SET_MAX_THREADS: {
|
|
+ int max_threads;
|
|
+
|
|
+ if (copy_from_user(&max_threads, ubuf,
|
|
+ sizeof(max_threads))) {
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
+ binder_inner_proc_lock(proc);
|
|
+ proc->max_threads = max_threads;
|
|
+ binder_inner_proc_unlock(proc);
|
|
break;
|
|
+ }
|
|
case BINDER_SET_CONTEXT_MGR:
|
|
ret = binder_ioctl_set_ctx_mgr(filp);
|
|
if (ret)
|
|
@@ -2801,7 +4676,7 @@ static long binder_ioctl(struct file *fi
|
|
case BINDER_THREAD_EXIT:
|
|
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
|
|
proc->pid, thread->pid);
|
|
- binder_free_thread(proc, thread);
|
|
+ binder_thread_release(proc, thread);
|
|
thread = NULL;
|
|
break;
|
|
case BINDER_VERSION: {
|
|
@@ -2818,6 +4693,24 @@ static long binder_ioctl(struct file *fi
|
|
}
|
|
break;
|
|
}
|
|
+ case BINDER_GET_NODE_DEBUG_INFO: {
|
|
+ struct binder_node_debug_info info;
|
|
+
|
|
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
|
|
+ ret = -EFAULT;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ ret = binder_ioctl_get_node_debug_info(proc, &info);
|
|
+ if (ret < 0)
|
|
+ goto err;
|
|
+
|
|
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
|
|
+ ret = -EFAULT;
|
|
+ goto err;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
default:
|
|
ret = -EINVAL;
|
|
goto err;
|
|
@@ -2825,8 +4718,7 @@ static long binder_ioctl(struct file *fi
|
|
ret = 0;
|
|
err:
|
|
if (thread)
|
|
- thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
|
|
- binder_unlock(__func__);
|
|
+ thread->looper_need_return = false;
|
|
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
|
if (ret && ret != -ERESTARTSYS)
|
|
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
|
|
@@ -2855,8 +4747,7 @@ static void binder_vma_close(struct vm_a
|
|
proc->pid, vma->vm_start, vma->vm_end,
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
|
- proc->vma = NULL;
|
|
- proc->vma_vm_mm = NULL;
|
|
+ binder_alloc_vma_close(&proc->alloc);
|
|
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
|
|
}
|
|
|
|
@@ -2874,10 +4765,8 @@ static const struct vm_operations_struct
|
|
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
{
|
|
int ret;
|
|
- struct vm_struct *area;
|
|
struct binder_proc *proc = filp->private_data;
|
|
const char *failure_string;
|
|
- struct binder_buffer *buffer;
|
|
|
|
if (proc->tsk != current->group_leader)
|
|
return -EINVAL;
|
|
@@ -2886,8 +4775,8 @@ static int binder_mmap(struct file *filp
|
|
vma->vm_end = vma->vm_start + SZ_4M;
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
- "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
|
|
- proc->pid, vma->vm_start, vma->vm_end,
|
|
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
|
|
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
|
|
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
|
|
(unsigned long)pgprot_val(vma->vm_page_prot));
|
|
|
|
@@ -2897,73 +4786,17 @@ static int binder_mmap(struct file *filp
|
|
goto err_bad_arg;
|
|
}
|
|
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
|
|
-
|
|
- mutex_lock(&binder_mmap_lock);
|
|
- if (proc->buffer) {
|
|
- ret = -EBUSY;
|
|
- failure_string = "already mapped";
|
|
- goto err_already_mapped;
|
|
- }
|
|
-
|
|
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
|
|
- if (area == NULL) {
|
|
- ret = -ENOMEM;
|
|
- failure_string = "get_vm_area";
|
|
- goto err_get_vm_area_failed;
|
|
- }
|
|
- proc->buffer = area->addr;
|
|
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
|
|
- mutex_unlock(&binder_mmap_lock);
|
|
-
|
|
-#ifdef CONFIG_CPU_CACHE_VIPT
|
|
- if (cache_is_vipt_aliasing()) {
|
|
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
|
|
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
|
|
- vma->vm_start += PAGE_SIZE;
|
|
- }
|
|
- }
|
|
-#endif
|
|
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
|
|
- if (proc->pages == NULL) {
|
|
- ret = -ENOMEM;
|
|
- failure_string = "alloc page array";
|
|
- goto err_alloc_pages_failed;
|
|
- }
|
|
- proc->buffer_size = vma->vm_end - vma->vm_start;
|
|
-
|
|
vma->vm_ops = &binder_vm_ops;
|
|
vma->vm_private_data = proc;
|
|
|
|
- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
|
|
- ret = -ENOMEM;
|
|
- failure_string = "alloc small buf";
|
|
- goto err_alloc_small_buf_failed;
|
|
- }
|
|
- buffer = proc->buffer;
|
|
- INIT_LIST_HEAD(&proc->buffers);
|
|
- list_add(&buffer->entry, &proc->buffers);
|
|
- buffer->free = 1;
|
|
- binder_insert_free_buffer(proc, buffer);
|
|
- proc->free_async_space = proc->buffer_size / 2;
|
|
- barrier();
|
|
+ ret = binder_alloc_mmap_handler(&proc->alloc, vma);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ mutex_lock(&proc->files_lock);
|
|
proc->files = get_files_struct(current);
|
|
- proc->vma = vma;
|
|
- proc->vma_vm_mm = vma->vm_mm;
|
|
-
|
|
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
|
|
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
|
|
+ mutex_unlock(&proc->files_lock);
|
|
return 0;
|
|
|
|
-err_alloc_small_buf_failed:
|
|
- kfree(proc->pages);
|
|
- proc->pages = NULL;
|
|
-err_alloc_pages_failed:
|
|
- mutex_lock(&binder_mmap_lock);
|
|
- vfree(proc->buffer);
|
|
- proc->buffer = NULL;
|
|
-err_get_vm_area_failed:
|
|
-err_already_mapped:
|
|
- mutex_unlock(&binder_mmap_lock);
|
|
err_bad_arg:
|
|
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
|
|
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
|
|
@@ -2973,6 +4806,7 @@ err_bad_arg:
|
|
static int binder_open(struct inode *nodp, struct file *filp)
|
|
{
|
|
struct binder_proc *proc;
|
|
+ struct binder_device *binder_dev;
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
|
|
current->group_leader->pid, current->pid);
|
|
@@ -2980,28 +4814,50 @@ static int binder_open(struct inode *nod
|
|
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
|
if (proc == NULL)
|
|
return -ENOMEM;
|
|
+ spin_lock_init(&proc->inner_lock);
|
|
+ spin_lock_init(&proc->outer_lock);
|
|
get_task_struct(current->group_leader);
|
|
proc->tsk = current->group_leader;
|
|
+ mutex_init(&proc->files_lock);
|
|
INIT_LIST_HEAD(&proc->todo);
|
|
- init_waitqueue_head(&proc->wait);
|
|
- proc->default_priority = task_nice(current);
|
|
+ if (binder_supported_policy(current->policy)) {
|
|
+ proc->default_priority.sched_policy = current->policy;
|
|
+ proc->default_priority.prio = current->normal_prio;
|
|
+ } else {
|
|
+ proc->default_priority.sched_policy = SCHED_NORMAL;
|
|
+ proc->default_priority.prio = NICE_TO_PRIO(0);
|
|
+ }
|
|
|
|
- binder_lock(__func__);
|
|
+ binder_dev = container_of(filp->private_data, struct binder_device,
|
|
+ miscdev);
|
|
+ proc->context = &binder_dev->context;
|
|
+ binder_alloc_init(&proc->alloc);
|
|
|
|
binder_stats_created(BINDER_STAT_PROC);
|
|
- hlist_add_head(&proc->proc_node, &binder_procs);
|
|
proc->pid = current->group_leader->pid;
|
|
INIT_LIST_HEAD(&proc->delivered_death);
|
|
+ INIT_LIST_HEAD(&proc->waiting_threads);
|
|
filp->private_data = proc;
|
|
|
|
- binder_unlock(__func__);
|
|
+ mutex_lock(&binder_procs_lock);
|
|
+ hlist_add_head(&proc->proc_node, &binder_procs);
|
|
+ mutex_unlock(&binder_procs_lock);
|
|
|
|
if (binder_debugfs_dir_entry_proc) {
|
|
char strbuf[11];
|
|
|
|
- snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
|
|
+ snprintf(strbuf, sizeof(strbuf), "%d", proc->pid);
|
|
+ /*
|
|
+ * proc debug entries are shared between contexts, so
|
|
+ * this will fail if the process tries to open the driver
|
|
+ * again with a different context. The priting code will
|
|
+ * anyway print all contexts that a given PID has, so this
|
|
+ * is not a problem.
|
|
+ */
|
|
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
|
|
- binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
|
|
+ binder_debugfs_dir_entry_proc,
|
|
+ (void *)(unsigned long)proc->pid,
|
|
+ &binder_proc_fops);
|
|
}
|
|
|
|
return 0;
|
|
@@ -3021,16 +4877,17 @@ static void binder_deferred_flush(struct
|
|
struct rb_node *n;
|
|
int wake_count = 0;
|
|
|
|
+ binder_inner_proc_lock(proc);
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
|
|
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
|
|
|
|
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
|
|
+ thread->looper_need_return = true;
|
|
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
|
|
wake_up_interruptible(&thread->wait);
|
|
wake_count++;
|
|
}
|
|
}
|
|
- wake_up_interruptible_all(&proc->wait);
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
"binder_flush: %d woke %d threads\n", proc->pid,
|
|
@@ -3051,13 +4908,21 @@ static int binder_node_release(struct bi
|
|
{
|
|
struct binder_ref *ref;
|
|
int death = 0;
|
|
+ struct binder_proc *proc = node->proc;
|
|
|
|
- list_del_init(&node->work.entry);
|
|
- binder_release_work(&node->async_todo);
|
|
+ binder_release_work(proc, &node->async_todo);
|
|
|
|
- if (hlist_empty(&node->refs)) {
|
|
- kfree(node);
|
|
- binder_stats_deleted(BINDER_STAT_NODE);
|
|
+ binder_node_lock(node);
|
|
+ binder_inner_proc_lock(proc);
|
|
+ binder_dequeue_work_ilocked(&node->work);
|
|
+ /*
|
|
+ * The caller must have taken a temporary ref on the node,
|
|
+ */
|
|
+ BUG_ON(!node->tmp_refs);
|
|
+ if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ binder_node_unlock(node);
|
|
+ binder_free_node(node);
|
|
|
|
return refs;
|
|
}
|
|
@@ -3065,59 +4930,84 @@ static int binder_node_release(struct bi
|
|
node->proc = NULL;
|
|
node->local_strong_refs = 0;
|
|
node->local_weak_refs = 0;
|
|
+ binder_inner_proc_unlock(proc);
|
|
+
|
|
+ spin_lock(&binder_dead_nodes_lock);
|
|
hlist_add_head(&node->dead_node, &binder_dead_nodes);
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
|
|
hlist_for_each_entry(ref, &node->refs, node_entry) {
|
|
refs++;
|
|
-
|
|
- if (!ref->death)
|
|
+ /*
|
|
+ * Need the node lock to synchronize
|
|
+ * with new notification requests and the
|
|
+ * inner lock to synchronize with queued
|
|
+ * death notifications.
|
|
+ */
|
|
+ binder_inner_proc_lock(ref->proc);
|
|
+ if (!ref->death) {
|
|
+ binder_inner_proc_unlock(ref->proc);
|
|
continue;
|
|
+ }
|
|
|
|
death++;
|
|
|
|
- if (list_empty(&ref->death->work.entry)) {
|
|
- ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
|
- list_add_tail(&ref->death->work.entry,
|
|
- &ref->proc->todo);
|
|
- wake_up_interruptible(&ref->proc->wait);
|
|
- } else
|
|
- BUG();
|
|
+ BUG_ON(!list_empty(&ref->death->work.entry));
|
|
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
|
|
+ binder_enqueue_work_ilocked(&ref->death->work,
|
|
+ &ref->proc->todo);
|
|
+ binder_wakeup_proc_ilocked(ref->proc);
|
|
+ binder_inner_proc_unlock(ref->proc);
|
|
}
|
|
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"node %d now dead, refs %d, death %d\n",
|
|
node->debug_id, refs, death);
|
|
+ binder_node_unlock(node);
|
|
+ binder_put_node(node);
|
|
|
|
return refs;
|
|
}
|
|
|
|
static void binder_deferred_release(struct binder_proc *proc)
|
|
{
|
|
- struct binder_transaction *t;
|
|
+ struct binder_context *context = proc->context;
|
|
struct rb_node *n;
|
|
- int threads, nodes, incoming_refs, outgoing_refs, buffers,
|
|
- active_transactions, page_count;
|
|
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
|
|
|
|
- BUG_ON(proc->vma);
|
|
BUG_ON(proc->files);
|
|
|
|
+ mutex_lock(&binder_procs_lock);
|
|
hlist_del(&proc->proc_node);
|
|
+ mutex_unlock(&binder_procs_lock);
|
|
|
|
- if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
|
|
+ mutex_lock(&context->context_mgr_node_lock);
|
|
+ if (context->binder_context_mgr_node &&
|
|
+ context->binder_context_mgr_node->proc == proc) {
|
|
binder_debug(BINDER_DEBUG_DEAD_BINDER,
|
|
"%s: %d context_mgr_node gone\n",
|
|
__func__, proc->pid);
|
|
- binder_context_mgr_node = NULL;
|
|
+ context->binder_context_mgr_node = NULL;
|
|
}
|
|
+ mutex_unlock(&context->context_mgr_node_lock);
|
|
+ binder_inner_proc_lock(proc);
|
|
+ /*
|
|
+ * Make sure proc stays alive after we
|
|
+ * remove all the threads
|
|
+ */
|
|
+ proc->tmp_ref++;
|
|
|
|
+ proc->is_dead = true;
|
|
threads = 0;
|
|
active_transactions = 0;
|
|
while ((n = rb_first(&proc->threads))) {
|
|
struct binder_thread *thread;
|
|
|
|
thread = rb_entry(n, struct binder_thread, rb_node);
|
|
+ binder_inner_proc_unlock(proc);
|
|
threads++;
|
|
- active_transactions += binder_free_thread(proc, thread);
|
|
+ active_transactions += binder_thread_release(proc, thread);
|
|
+ binder_inner_proc_lock(proc);
|
|
}
|
|
|
|
nodes = 0;
|
|
@@ -3127,73 +5017,42 @@ static void binder_deferred_release(stru
|
|
|
|
node = rb_entry(n, struct binder_node, rb_node);
|
|
nodes++;
|
|
+ /*
|
|
+ * take a temporary ref on the node before
|
|
+ * calling binder_node_release() which will either
|
|
+ * kfree() the node or call binder_put_node()
|
|
+ */
|
|
+ binder_inc_node_tmpref_ilocked(node);
|
|
rb_erase(&node->rb_node, &proc->nodes);
|
|
+ binder_inner_proc_unlock(proc);
|
|
incoming_refs = binder_node_release(node, incoming_refs);
|
|
+ binder_inner_proc_lock(proc);
|
|
}
|
|
+ binder_inner_proc_unlock(proc);
|
|
|
|
outgoing_refs = 0;
|
|
+ binder_proc_lock(proc);
|
|
while ((n = rb_first(&proc->refs_by_desc))) {
|
|
struct binder_ref *ref;
|
|
|
|
ref = rb_entry(n, struct binder_ref, rb_node_desc);
|
|
outgoing_refs++;
|
|
- binder_delete_ref(ref);
|
|
- }
|
|
-
|
|
- binder_release_work(&proc->todo);
|
|
- binder_release_work(&proc->delivered_death);
|
|
-
|
|
- buffers = 0;
|
|
- while ((n = rb_first(&proc->allocated_buffers))) {
|
|
- struct binder_buffer *buffer;
|
|
-
|
|
- buffer = rb_entry(n, struct binder_buffer, rb_node);
|
|
-
|
|
- t = buffer->transaction;
|
|
- if (t) {
|
|
- t->buffer = NULL;
|
|
- buffer->transaction = NULL;
|
|
- pr_err("release proc %d, transaction %d, not freed\n",
|
|
- proc->pid, t->debug_id);
|
|
- /*BUG();*/
|
|
- }
|
|
-
|
|
- binder_free_buf(proc, buffer);
|
|
- buffers++;
|
|
- }
|
|
-
|
|
- binder_stats_deleted(BINDER_STAT_PROC);
|
|
-
|
|
- page_count = 0;
|
|
- if (proc->pages) {
|
|
- int i;
|
|
-
|
|
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
|
|
- void *page_addr;
|
|
-
|
|
- if (!proc->pages[i])
|
|
- continue;
|
|
-
|
|
- page_addr = proc->buffer + i * PAGE_SIZE;
|
|
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
|
- "%s: %d: page %d at %p not freed\n",
|
|
- __func__, proc->pid, i, page_addr);
|
|
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
|
|
- __free_page(proc->pages[i]);
|
|
- page_count++;
|
|
- }
|
|
- kfree(proc->pages);
|
|
- vfree(proc->buffer);
|
|
+ binder_cleanup_ref_olocked(ref);
|
|
+ binder_proc_unlock(proc);
|
|
+ binder_free_ref(ref);
|
|
+ binder_proc_lock(proc);
|
|
}
|
|
+ binder_proc_unlock(proc);
|
|
|
|
- put_task_struct(proc->tsk);
|
|
+ binder_release_work(proc, &proc->todo);
|
|
+ binder_release_work(proc, &proc->delivered_death);
|
|
|
|
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
|
|
- "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
|
|
+ "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
|
|
__func__, proc->pid, threads, nodes, incoming_refs,
|
|
- outgoing_refs, active_transactions, buffers, page_count);
|
|
+ outgoing_refs, active_transactions);
|
|
|
|
- kfree(proc);
|
|
+ binder_proc_dec_tmpref(proc);
|
|
}
|
|
|
|
static void binder_deferred_func(struct work_struct *work)
|
|
@@ -3204,7 +5063,6 @@ static void binder_deferred_func(struct
|
|
int defer;
|
|
|
|
do {
|
|
- binder_lock(__func__);
|
|
mutex_lock(&binder_deferred_lock);
|
|
if (!hlist_empty(&binder_deferred_list)) {
|
|
proc = hlist_entry(binder_deferred_list.first,
|
|
@@ -3220,9 +5078,11 @@ static void binder_deferred_func(struct
|
|
|
|
files = NULL;
|
|
if (defer & BINDER_DEFERRED_PUT_FILES) {
|
|
+ mutex_lock(&proc->files_lock);
|
|
files = proc->files;
|
|
if (files)
|
|
proc->files = NULL;
|
|
+ mutex_unlock(&proc->files_lock);
|
|
}
|
|
|
|
if (defer & BINDER_DEFERRED_FLUSH)
|
|
@@ -3231,7 +5091,6 @@ static void binder_deferred_func(struct
|
|
if (defer & BINDER_DEFERRED_RELEASE)
|
|
binder_deferred_release(proc); /* frees proc */
|
|
|
|
- binder_unlock(__func__);
|
|
if (files)
|
|
put_files_struct(files);
|
|
} while (proc);
|
|
@@ -3251,41 +5110,52 @@ binder_defer_work(struct binder_proc *pr
|
|
mutex_unlock(&binder_deferred_lock);
|
|
}
|
|
|
|
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
|
|
- struct binder_transaction *t)
|
|
+static void print_binder_transaction_ilocked(struct seq_file *m,
|
|
+ struct binder_proc *proc,
|
|
+ const char *prefix,
|
|
+ struct binder_transaction *t)
|
|
{
|
|
+ struct binder_proc *to_proc;
|
|
+ struct binder_buffer *buffer = t->buffer;
|
|
+
|
|
+ spin_lock(&t->lock);
|
|
+ to_proc = t->to_proc;
|
|
seq_printf(m,
|
|
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
|
|
+ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
|
|
prefix, t->debug_id, t,
|
|
t->from ? t->from->proc->pid : 0,
|
|
t->from ? t->from->pid : 0,
|
|
- t->to_proc ? t->to_proc->pid : 0,
|
|
+ to_proc ? to_proc->pid : 0,
|
|
t->to_thread ? t->to_thread->pid : 0,
|
|
- t->code, t->flags, t->priority, t->need_reply);
|
|
- if (t->buffer == NULL) {
|
|
+ t->code, t->flags, t->priority.sched_policy,
|
|
+ t->priority.prio, t->need_reply);
|
|
+ spin_unlock(&t->lock);
|
|
+
|
|
+ if (proc != to_proc) {
|
|
+ /*
|
|
+ * Can only safely deref buffer if we are holding the
|
|
+ * correct proc inner lock for this node
|
|
+ */
|
|
+ seq_puts(m, "\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (buffer == NULL) {
|
|
seq_puts(m, " buffer free\n");
|
|
return;
|
|
}
|
|
- if (t->buffer->target_node)
|
|
- seq_printf(m, " node %d",
|
|
- t->buffer->target_node->debug_id);
|
|
+ if (buffer->target_node)
|
|
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
|
|
seq_printf(m, " size %zd:%zd data %p\n",
|
|
- t->buffer->data_size, t->buffer->offsets_size,
|
|
- t->buffer->data);
|
|
-}
|
|
-
|
|
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
|
|
- struct binder_buffer *buffer)
|
|
-{
|
|
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
|
|
- prefix, buffer->debug_id, buffer->data,
|
|
buffer->data_size, buffer->offsets_size,
|
|
- buffer->transaction ? "active" : "delivered");
|
|
+ buffer->data);
|
|
}
|
|
|
|
-static void print_binder_work(struct seq_file *m, const char *prefix,
|
|
- const char *transaction_prefix,
|
|
- struct binder_work *w)
|
|
+static void print_binder_work_ilocked(struct seq_file *m,
|
|
+ struct binder_proc *proc,
|
|
+ const char *prefix,
|
|
+ const char *transaction_prefix,
|
|
+ struct binder_work *w)
|
|
{
|
|
struct binder_node *node;
|
|
struct binder_transaction *t;
|
|
@@ -3293,8 +5163,16 @@ static void print_binder_work(struct seq
|
|
switch (w->type) {
|
|
case BINDER_WORK_TRANSACTION:
|
|
t = container_of(w, struct binder_transaction, work);
|
|
- print_binder_transaction(m, transaction_prefix, t);
|
|
+ print_binder_transaction_ilocked(
|
|
+ m, proc, transaction_prefix, t);
|
|
break;
|
|
+ case BINDER_WORK_RETURN_ERROR: {
|
|
+ struct binder_error *e = container_of(
|
|
+ w, struct binder_error, work);
|
|
+
|
|
+ seq_printf(m, "%stransaction error: %u\n",
|
|
+ prefix, e->cmd);
|
|
+ } break;
|
|
case BINDER_WORK_TRANSACTION_COMPLETE:
|
|
seq_printf(m, "%stransaction complete\n", prefix);
|
|
break;
|
|
@@ -3319,40 +5197,46 @@ static void print_binder_work(struct seq
|
|
}
|
|
}
|
|
|
|
-static void print_binder_thread(struct seq_file *m,
|
|
- struct binder_thread *thread,
|
|
- int print_always)
|
|
+static void print_binder_thread_ilocked(struct seq_file *m,
|
|
+ struct binder_thread *thread,
|
|
+ int print_always)
|
|
{
|
|
struct binder_transaction *t;
|
|
struct binder_work *w;
|
|
size_t start_pos = m->count;
|
|
size_t header_pos;
|
|
|
|
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
|
|
+ seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
|
|
+ thread->pid, thread->looper,
|
|
+ thread->looper_need_return,
|
|
+ atomic_read(&thread->tmp_ref));
|
|
header_pos = m->count;
|
|
t = thread->transaction_stack;
|
|
while (t) {
|
|
if (t->from == thread) {
|
|
- print_binder_transaction(m,
|
|
- " outgoing transaction", t);
|
|
+ print_binder_transaction_ilocked(m, thread->proc,
|
|
+ " outgoing transaction", t);
|
|
t = t->from_parent;
|
|
} else if (t->to_thread == thread) {
|
|
- print_binder_transaction(m,
|
|
+ print_binder_transaction_ilocked(m, thread->proc,
|
|
" incoming transaction", t);
|
|
t = t->to_parent;
|
|
} else {
|
|
- print_binder_transaction(m, " bad transaction", t);
|
|
+ print_binder_transaction_ilocked(m, thread->proc,
|
|
+ " bad transaction", t);
|
|
t = NULL;
|
|
}
|
|
}
|
|
list_for_each_entry(w, &thread->todo, entry) {
|
|
- print_binder_work(m, " ", " pending transaction", w);
|
|
+ print_binder_work_ilocked(m, thread->proc, " ",
|
|
+ " pending transaction", w);
|
|
}
|
|
if (!print_always && m->count == header_pos)
|
|
m->count = start_pos;
|
|
}
|
|
|
|
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
|
|
+static void print_binder_node_nilocked(struct seq_file *m,
|
|
+ struct binder_node *node)
|
|
{
|
|
struct binder_ref *ref;
|
|
struct binder_work *w;
|
|
@@ -3362,27 +5246,35 @@ static void print_binder_node(struct seq
|
|
hlist_for_each_entry(ref, &node->refs, node_entry)
|
|
count++;
|
|
|
|
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
|
|
+ seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
|
|
node->debug_id, (u64)node->ptr, (u64)node->cookie,
|
|
+ node->sched_policy, node->min_priority,
|
|
node->has_strong_ref, node->has_weak_ref,
|
|
node->local_strong_refs, node->local_weak_refs,
|
|
- node->internal_strong_refs, count);
|
|
+ node->internal_strong_refs, count, node->tmp_refs);
|
|
if (count) {
|
|
seq_puts(m, " proc");
|
|
hlist_for_each_entry(ref, &node->refs, node_entry)
|
|
seq_printf(m, " %d", ref->proc->pid);
|
|
}
|
|
seq_puts(m, "\n");
|
|
- list_for_each_entry(w, &node->async_todo, entry)
|
|
- print_binder_work(m, " ",
|
|
- " pending async transaction", w);
|
|
+ if (node->proc) {
|
|
+ list_for_each_entry(w, &node->async_todo, entry)
|
|
+ print_binder_work_ilocked(m, node->proc, " ",
|
|
+ " pending async transaction", w);
|
|
+ }
|
|
}
|
|
|
|
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
|
|
+static void print_binder_ref_olocked(struct seq_file *m,
|
|
+ struct binder_ref *ref)
|
|
{
|
|
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
|
|
- ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
|
|
- ref->node->debug_id, ref->strong, ref->weak, ref->death);
|
|
+ binder_node_lock(ref->node);
|
|
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
|
|
+ ref->data.debug_id, ref->data.desc,
|
|
+ ref->node->proc ? "" : "dead ",
|
|
+ ref->node->debug_id, ref->data.strong,
|
|
+ ref->data.weak, ref->death);
|
|
+ binder_node_unlock(ref->node);
|
|
}
|
|
|
|
static void print_binder_proc(struct seq_file *m,
|
|
@@ -3392,35 +5284,60 @@ static void print_binder_proc(struct seq
|
|
struct rb_node *n;
|
|
size_t start_pos = m->count;
|
|
size_t header_pos;
|
|
+ struct binder_node *last_node = NULL;
|
|
|
|
seq_printf(m, "proc %d\n", proc->pid);
|
|
+ seq_printf(m, "context %s\n", proc->context->name);
|
|
header_pos = m->count;
|
|
|
|
+ binder_inner_proc_lock(proc);
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
|
- print_binder_thread(m, rb_entry(n, struct binder_thread,
|
|
+ print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
|
|
rb_node), print_all);
|
|
+
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
|
|
struct binder_node *node = rb_entry(n, struct binder_node,
|
|
rb_node);
|
|
- if (print_all || node->has_async_transaction)
|
|
- print_binder_node(m, node);
|
|
+ /*
|
|
+ * take a temporary reference on the node so it
|
|
+ * survives and isn't removed from the tree
|
|
+ * while we print it.
|
|
+ */
|
|
+ binder_inc_node_tmpref_ilocked(node);
|
|
+ /* Need to drop inner lock to take node lock */
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ if (last_node)
|
|
+ binder_put_node(last_node);
|
|
+ binder_node_inner_lock(node);
|
|
+ print_binder_node_nilocked(m, node);
|
|
+ binder_node_inner_unlock(node);
|
|
+ last_node = node;
|
|
+ binder_inner_proc_lock(proc);
|
|
}
|
|
+ binder_inner_proc_unlock(proc);
|
|
+ if (last_node)
|
|
+ binder_put_node(last_node);
|
|
+
|
|
if (print_all) {
|
|
+ binder_proc_lock(proc);
|
|
for (n = rb_first(&proc->refs_by_desc);
|
|
n != NULL;
|
|
n = rb_next(n))
|
|
- print_binder_ref(m, rb_entry(n, struct binder_ref,
|
|
- rb_node_desc));
|
|
+ print_binder_ref_olocked(m, rb_entry(n,
|
|
+ struct binder_ref,
|
|
+ rb_node_desc));
|
|
+ binder_proc_unlock(proc);
|
|
}
|
|
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
|
|
- print_binder_buffer(m, " buffer",
|
|
- rb_entry(n, struct binder_buffer, rb_node));
|
|
+ binder_alloc_print_allocated(m, &proc->alloc);
|
|
+ binder_inner_proc_lock(proc);
|
|
list_for_each_entry(w, &proc->todo, entry)
|
|
- print_binder_work(m, " ", " pending transaction", w);
|
|
+ print_binder_work_ilocked(m, proc, " ",
|
|
+ " pending transaction", w);
|
|
list_for_each_entry(w, &proc->delivered_death, entry) {
|
|
seq_puts(m, " has delivered dead binder\n");
|
|
break;
|
|
}
|
|
+ binder_inner_proc_unlock(proc);
|
|
if (!print_all && m->count == header_pos)
|
|
m->count = start_pos;
|
|
}
|
|
@@ -3463,7 +5380,9 @@ static const char * const binder_command
|
|
"BC_EXIT_LOOPER",
|
|
"BC_REQUEST_DEATH_NOTIFICATION",
|
|
"BC_CLEAR_DEATH_NOTIFICATION",
|
|
- "BC_DEAD_BINDER_DONE"
|
|
+ "BC_DEAD_BINDER_DONE",
|
|
+ "BC_TRANSACTION_SG",
|
|
+ "BC_REPLY_SG",
|
|
};
|
|
|
|
static const char * const binder_objstat_strings[] = {
|
|
@@ -3484,17 +5403,21 @@ static void print_binder_stats(struct se
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
|
|
ARRAY_SIZE(binder_command_strings));
|
|
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
|
|
- if (stats->bc[i])
|
|
+ int temp = atomic_read(&stats->bc[i]);
|
|
+
|
|
+ if (temp)
|
|
seq_printf(m, "%s%s: %d\n", prefix,
|
|
- binder_command_strings[i], stats->bc[i]);
|
|
+ binder_command_strings[i], temp);
|
|
}
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
|
|
ARRAY_SIZE(binder_return_strings));
|
|
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
|
|
- if (stats->br[i])
|
|
+ int temp = atomic_read(&stats->br[i]);
|
|
+
|
|
+ if (temp)
|
|
seq_printf(m, "%s%s: %d\n", prefix,
|
|
- binder_return_strings[i], stats->br[i]);
|
|
+ binder_return_strings[i], temp);
|
|
}
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
|
|
@@ -3502,11 +5425,15 @@ static void print_binder_stats(struct se
|
|
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
|
|
ARRAY_SIZE(stats->obj_deleted));
|
|
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
|
|
- if (stats->obj_created[i] || stats->obj_deleted[i])
|
|
- seq_printf(m, "%s%s: active %d total %d\n", prefix,
|
|
+ int created = atomic_read(&stats->obj_created[i]);
|
|
+ int deleted = atomic_read(&stats->obj_deleted[i]);
|
|
+
|
|
+ if (created || deleted)
|
|
+ seq_printf(m, "%s%s: active %d total %d\n",
|
|
+ prefix,
|
|
binder_objstat_strings[i],
|
|
- stats->obj_created[i] - stats->obj_deleted[i],
|
|
- stats->obj_created[i]);
|
|
+ created - deleted,
|
|
+ created);
|
|
}
|
|
}
|
|
|
|
@@ -3514,50 +5441,59 @@ static void print_binder_proc_stats(stru
|
|
struct binder_proc *proc)
|
|
{
|
|
struct binder_work *w;
|
|
+ struct binder_thread *thread;
|
|
struct rb_node *n;
|
|
- int count, strong, weak;
|
|
+ int count, strong, weak, ready_threads;
|
|
+ size_t free_async_space =
|
|
+ binder_alloc_get_free_async_space(&proc->alloc);
|
|
|
|
seq_printf(m, "proc %d\n", proc->pid);
|
|
+ seq_printf(m, "context %s\n", proc->context->name);
|
|
count = 0;
|
|
+ ready_threads = 0;
|
|
+ binder_inner_proc_lock(proc);
|
|
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
|
|
count++;
|
|
+
|
|
+ list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
|
|
+ ready_threads++;
|
|
+
|
|
seq_printf(m, " threads: %d\n", count);
|
|
seq_printf(m, " requested threads: %d+%d/%d\n"
|
|
" ready threads %d\n"
|
|
" free async space %zd\n", proc->requested_threads,
|
|
proc->requested_threads_started, proc->max_threads,
|
|
- proc->ready_threads, proc->free_async_space);
|
|
+ ready_threads,
|
|
+ free_async_space);
|
|
count = 0;
|
|
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
|
|
count++;
|
|
+ binder_inner_proc_unlock(proc);
|
|
seq_printf(m, " nodes: %d\n", count);
|
|
count = 0;
|
|
strong = 0;
|
|
weak = 0;
|
|
+ binder_proc_lock(proc);
|
|
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
|
|
struct binder_ref *ref = rb_entry(n, struct binder_ref,
|
|
rb_node_desc);
|
|
count++;
|
|
- strong += ref->strong;
|
|
- weak += ref->weak;
|
|
+ strong += ref->data.strong;
|
|
+ weak += ref->data.weak;
|
|
}
|
|
+ binder_proc_unlock(proc);
|
|
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
|
|
|
|
- count = 0;
|
|
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
|
|
- count++;
|
|
+ count = binder_alloc_get_allocated_count(&proc->alloc);
|
|
seq_printf(m, " buffers: %d\n", count);
|
|
|
|
count = 0;
|
|
+ binder_inner_proc_lock(proc);
|
|
list_for_each_entry(w, &proc->todo, entry) {
|
|
- switch (w->type) {
|
|
- case BINDER_WORK_TRANSACTION:
|
|
+ if (w->type == BINDER_WORK_TRANSACTION)
|
|
count++;
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
}
|
|
+ binder_inner_proc_unlock(proc);
|
|
seq_printf(m, " pending transactions: %d\n", count);
|
|
|
|
print_binder_stats(m, " ", &proc->stats);
|
|
@@ -3568,107 +5504,131 @@ static int binder_state_show(struct seq_
|
|
{
|
|
struct binder_proc *proc;
|
|
struct binder_node *node;
|
|
- int do_lock = !binder_debug_no_lock;
|
|
-
|
|
- if (do_lock)
|
|
- binder_lock(__func__);
|
|
+ struct binder_node *last_node = NULL;
|
|
|
|
seq_puts(m, "binder state:\n");
|
|
|
|
+ spin_lock(&binder_dead_nodes_lock);
|
|
if (!hlist_empty(&binder_dead_nodes))
|
|
seq_puts(m, "dead nodes:\n");
|
|
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
|
|
- print_binder_node(m, node);
|
|
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
|
|
+ /*
|
|
+ * take a temporary reference on the node so it
|
|
+ * survives and isn't removed from the list
|
|
+ * while we print it.
|
|
+ */
|
|
+ node->tmp_refs++;
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
+ if (last_node)
|
|
+ binder_put_node(last_node);
|
|
+ binder_node_lock(node);
|
|
+ print_binder_node_nilocked(m, node);
|
|
+ binder_node_unlock(node);
|
|
+ last_node = node;
|
|
+ spin_lock(&binder_dead_nodes_lock);
|
|
+ }
|
|
+ spin_unlock(&binder_dead_nodes_lock);
|
|
+ if (last_node)
|
|
+ binder_put_node(last_node);
|
|
|
|
+ mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
|
print_binder_proc(m, proc, 1);
|
|
- if (do_lock)
|
|
- binder_unlock(__func__);
|
|
+ mutex_unlock(&binder_procs_lock);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static int binder_stats_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_proc *proc;
|
|
- int do_lock = !binder_debug_no_lock;
|
|
-
|
|
- if (do_lock)
|
|
- binder_lock(__func__);
|
|
|
|
seq_puts(m, "binder stats:\n");
|
|
|
|
print_binder_stats(m, "", &binder_stats);
|
|
|
|
+ mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
|
print_binder_proc_stats(m, proc);
|
|
- if (do_lock)
|
|
- binder_unlock(__func__);
|
|
+ mutex_unlock(&binder_procs_lock);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static int binder_transactions_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_proc *proc;
|
|
- int do_lock = !binder_debug_no_lock;
|
|
-
|
|
- if (do_lock)
|
|
- binder_lock(__func__);
|
|
|
|
seq_puts(m, "binder transactions:\n");
|
|
+ mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(proc, &binder_procs, proc_node)
|
|
print_binder_proc(m, proc, 0);
|
|
- if (do_lock)
|
|
- binder_unlock(__func__);
|
|
+ mutex_unlock(&binder_procs_lock);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static int binder_proc_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_proc *itr;
|
|
- struct binder_proc *proc = m->private;
|
|
- int do_lock = !binder_debug_no_lock;
|
|
- bool valid_proc = false;
|
|
-
|
|
- if (do_lock)
|
|
- binder_lock(__func__);
|
|
+ int pid = (unsigned long)m->private;
|
|
|
|
+ mutex_lock(&binder_procs_lock);
|
|
hlist_for_each_entry(itr, &binder_procs, proc_node) {
|
|
- if (itr == proc) {
|
|
- valid_proc = true;
|
|
- break;
|
|
+ if (itr->pid == pid) {
|
|
+ seq_puts(m, "binder proc state:\n");
|
|
+ print_binder_proc(m, itr, 1);
|
|
}
|
|
}
|
|
- if (valid_proc) {
|
|
- seq_puts(m, "binder proc state:\n");
|
|
- print_binder_proc(m, proc, 1);
|
|
- }
|
|
- if (do_lock)
|
|
- binder_unlock(__func__);
|
|
+ mutex_unlock(&binder_procs_lock);
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
static void print_binder_transaction_log_entry(struct seq_file *m,
|
|
struct binder_transaction_log_entry *e)
|
|
{
|
|
+ int debug_id = READ_ONCE(e->debug_id_done);
|
|
+ /*
|
|
+ * read barrier to guarantee debug_id_done read before
|
|
+ * we print the log values
|
|
+ */
|
|
+ smp_rmb();
|
|
seq_printf(m,
|
|
- "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
|
|
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
|
|
e->debug_id, (e->call_type == 2) ? "reply" :
|
|
((e->call_type == 1) ? "async" : "call "), e->from_proc,
|
|
- e->from_thread, e->to_proc, e->to_thread, e->to_node,
|
|
- e->target_handle, e->data_size, e->offsets_size);
|
|
+ e->from_thread, e->to_proc, e->to_thread, e->context_name,
|
|
+ e->to_node, e->target_handle, e->data_size, e->offsets_size,
|
|
+ e->return_error, e->return_error_param,
|
|
+ e->return_error_line);
|
|
+ /*
|
|
+ * read-barrier to guarantee read of debug_id_done after
|
|
+ * done printing the fields of the entry
|
|
+ */
|
|
+ smp_rmb();
|
|
+ seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
|
|
+ "\n" : " (incomplete)\n");
|
|
}
|
|
|
|
static int binder_transaction_log_show(struct seq_file *m, void *unused)
|
|
{
|
|
struct binder_transaction_log *log = m->private;
|
|
+ unsigned int log_cur = atomic_read(&log->cur);
|
|
+ unsigned int count;
|
|
+ unsigned int cur;
|
|
int i;
|
|
|
|
- if (log->full) {
|
|
- for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
|
|
- print_binder_transaction_log_entry(m, &log->entry[i]);
|
|
+ count = log_cur + 1;
|
|
+ cur = count < ARRAY_SIZE(log->entry) && !log->full ?
|
|
+ 0 : count % ARRAY_SIZE(log->entry);
|
|
+ if (count > ARRAY_SIZE(log->entry) || log->full)
|
|
+ count = ARRAY_SIZE(log->entry);
|
|
+ for (i = 0; i < count; i++) {
|
|
+ unsigned int index = cur++ % ARRAY_SIZE(log->entry);
|
|
+
|
|
+ print_binder_transaction_log_entry(m, &log->entry[index]);
|
|
}
|
|
- for (i = 0; i < log->next; i++)
|
|
- print_binder_transaction_log_entry(m, &log->entry[i]);
|
|
return 0;
|
|
}
|
|
|
|
@@ -3683,26 +5643,54 @@ static const struct file_operations bind
|
|
.release = binder_release,
|
|
};
|
|
|
|
-static struct miscdevice binder_miscdev = {
|
|
- .minor = MISC_DYNAMIC_MINOR,
|
|
- .name = "binder",
|
|
- .fops = &binder_fops
|
|
-};
|
|
-
|
|
BINDER_DEBUG_ENTRY(state);
|
|
BINDER_DEBUG_ENTRY(stats);
|
|
BINDER_DEBUG_ENTRY(transactions);
|
|
BINDER_DEBUG_ENTRY(transaction_log);
|
|
|
|
+static int __init init_binder_device(const char *name)
|
|
+{
|
|
+ int ret;
|
|
+ struct binder_device *binder_device;
|
|
+
|
|
+ binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
|
|
+ if (!binder_device)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ binder_device->miscdev.fops = &binder_fops;
|
|
+ binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
|
|
+ binder_device->miscdev.name = name;
|
|
+
|
|
+ binder_device->context.binder_context_mgr_uid = INVALID_UID;
|
|
+ binder_device->context.name = name;
|
|
+ mutex_init(&binder_device->context.context_mgr_node_lock);
|
|
+
|
|
+ ret = misc_register(&binder_device->miscdev);
|
|
+ if (ret < 0) {
|
|
+ kfree(binder_device);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ hlist_add_head(&binder_device->hlist, &binder_devices);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
static int __init binder_init(void)
|
|
{
|
|
int ret;
|
|
+ char *device_name, *device_names;
|
|
+ struct binder_device *device;
|
|
+ struct hlist_node *tmp;
|
|
+
|
|
+ atomic_set(&binder_transaction_log.cur, ~0U);
|
|
+ atomic_set(&binder_transaction_log_failed.cur, ~0U);
|
|
|
|
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
|
|
if (binder_debugfs_dir_entry_root)
|
|
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
|
|
binder_debugfs_dir_entry_root);
|
|
- ret = misc_register(&binder_miscdev);
|
|
+
|
|
if (binder_debugfs_dir_entry_root) {
|
|
debugfs_create_file("state",
|
|
S_IRUGO,
|
|
@@ -3730,6 +5718,35 @@ static int __init binder_init(void)
|
|
&binder_transaction_log_failed,
|
|
&binder_transaction_log_fops);
|
|
}
|
|
+
|
|
+ /*
|
|
+ * Copy the module_parameter string, because we don't want to
|
|
+ * tokenize it in-place.
|
|
+ */
|
|
+ device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
|
|
+ if (!device_names) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_alloc_device_names_failed;
|
|
+ }
|
|
+ strcpy(device_names, binder_devices_param);
|
|
+
|
|
+ while ((device_name = strsep(&device_names, ","))) {
|
|
+ ret = init_binder_device(device_name);
|
|
+ if (ret)
|
|
+ goto err_init_binder_device_failed;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+
|
|
+err_init_binder_device_failed:
|
|
+ hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
|
|
+ misc_deregister(&device->miscdev);
|
|
+ hlist_del(&device->hlist);
|
|
+ kfree(device);
|
|
+ }
|
|
+err_alloc_device_names_failed:
|
|
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
|
|
+
|
|
return ret;
|
|
}
|
|
|