binder: protect proc->nodes with inner lock

When locks for binder_ref handling are added, proc->nodes
will need to be modified while holding the outer lock

Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Todd Kjos 2017-06-29 12:02:04 -07:00 committed by Greg Kroah-Hartman
parent 673068eee8
commit da0fa9e4e8

View File

@ -313,6 +313,7 @@ struct binder_error {
* @work: worklist element for node work
* (protected by @proc->inner_lock)
* @rb_node: element for proc->nodes tree
* (protected by @proc->inner_lock)
* @dead_node: element for binder_dead_nodes list
* (protected by binder_dead_nodes_lock)
* @proc: binder_proc that owns this node
@ -470,6 +471,7 @@ enum binder_deferred_state {
* @threads: rbtree of binder_threads in this proc
* @nodes: rbtree of binder nodes associated with
* this proc ordered by node->ptr
* (protected by @inner_lock)
* @refs_by_desc: rbtree of refs ordered by ref->desc
* @refs_by_node: rbtree of refs ordered by ref->node
* @pid PID of group_leader of process
@ -856,7 +858,7 @@ static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref(struct binder_node *node);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
@ -938,12 +940,14 @@ static void binder_set_nice(long nice)
binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
}
static struct binder_node *binder_get_node(struct binder_proc *proc,
binder_uintptr_t ptr)
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
BUG_ON(!spin_is_locked(&proc->inner_lock));
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@ -957,15 +961,28 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
* to ensure node stays alive until
* call to binder_put_node()
*/
binder_inc_node_tmpref(node);
binder_inc_node_tmpref_ilocked(node);
return node;
}
}
return NULL;
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
struct flat_binder_object *fp)
static struct binder_node *binder_get_node(struct binder_proc *proc,
binder_uintptr_t ptr)
{
struct binder_node *node;
binder_inner_proc_lock(proc);
node = binder_get_node_ilocked(proc, ptr);
binder_inner_proc_unlock(proc);
return node;
}
static struct binder_node *binder_init_node_ilocked(
struct binder_proc *proc,
struct binder_node *new_node,
struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
@ -974,7 +991,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
binder_uintptr_t cookie = fp ? fp->cookie : 0;
__u32 flags = fp ? fp->flags : 0;
BUG_ON(!spin_is_locked(&proc->inner_lock));
while (*p) {
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
@ -982,13 +1001,17 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
else
return NULL;
else {
/*
* A matching node is already in
* the rb tree. Abandon the init
* and return it.
*/
binder_inc_node_tmpref_ilocked(node);
return node;
}
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL)
return NULL;
node = new_node;
binder_stats_created(BINDER_STAT_NODE);
node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p);
@ -1007,6 +1030,27 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
return node;
}
static struct binder_node *binder_new_node(struct binder_proc *proc,
struct flat_binder_object *fp)
{
struct binder_node *node;
struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!new_node)
return NULL;
binder_inner_proc_lock(proc);
node = binder_init_node_ilocked(proc, new_node, fp);
binder_inner_proc_unlock(proc);
if (node != new_node)
/*
* The node was already added by another thread
*/
kfree(new_node);
return node;
}
@ -4420,6 +4464,7 @@ static void binder_deferred_release(struct binder_proc *proc)
nodes = 0;
incoming_refs = 0;
binder_inner_proc_lock(proc);
while ((n = rb_first(&proc->nodes))) {
struct binder_node *node;
@ -4430,10 +4475,13 @@ static void binder_deferred_release(struct binder_proc *proc)
* calling binder_node_release() which will either
* kfree() the node or call binder_put_node()
*/
binder_inc_node_tmpref(node);
binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes);
binder_inner_proc_unlock(proc);
incoming_refs = binder_node_release(node, incoming_refs);
binder_inner_proc_lock(proc);
}
binder_inner_proc_unlock(proc);
outgoing_refs = 0;
while ((n = rb_first(&proc->refs_by_desc))) {
@ -4618,14 +4666,16 @@ static void print_binder_thread_ilocked(struct seq_file *m,
m->count = start_pos;
}
static void print_binder_node_nlocked(struct seq_file *m,
struct binder_node *node)
static void print_binder_node_nilocked(struct seq_file *m,
struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
int count;
WARN_ON(!spin_is_locked(&node->lock));
if (node->proc)
WARN_ON(!spin_is_locked(&node->proc->inner_lock));
count = 0;
hlist_for_each_entry(ref, &node->refs, node_entry)
@ -4643,11 +4693,9 @@ static void print_binder_node_nlocked(struct seq_file *m,
}
seq_puts(m, "\n");
if (node->proc) {
binder_inner_proc_lock(node->proc);
list_for_each_entry(w, &node->async_todo, entry)
print_binder_work_ilocked(m, " ",
" pending async transaction", w);
binder_inner_proc_unlock(node->proc);
}
}
@ -4669,6 +4717,7 @@ static void print_binder_proc(struct seq_file *m,
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
@ -4678,15 +4727,30 @@ static void print_binder_proc(struct seq_file *m,
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
binder_inner_proc_unlock(proc);
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
binder_node_lock(node);
if (print_all || node->has_async_transaction)
print_binder_node_nlocked(m, node);
binder_node_unlock(node);
/*
* take a temporary reference on the node so it
* survives and isn't removed from the tree
* while we print it.
*/
binder_inc_node_tmpref_ilocked(node);
/* Need to drop inner lock to take node lock */
binder_inner_proc_unlock(proc);
if (last_node)
binder_put_node(last_node);
binder_node_inner_lock(node);
print_binder_node_nilocked(m, node);
binder_node_inner_unlock(node);
last_node = node;
binder_inner_proc_lock(proc);
}
binder_inner_proc_unlock(proc);
if (last_node)
binder_put_node(last_node);
if (print_all) {
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
@ -4822,8 +4886,10 @@ static void print_binder_proc_stats(struct seq_file *m,
proc->ready_threads,
binder_alloc_get_free_async_space(&proc->alloc));
count = 0;
binder_inner_proc_lock(proc);
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
@ -4877,7 +4943,7 @@ static int binder_state_show(struct seq_file *m, void *unused)
if (last_node)
binder_put_node(last_node);
binder_node_lock(node);
print_binder_node_nlocked(m, node);
print_binder_node_nilocked(m, node);
binder_node_unlock(node);
last_node = node;
spin_lock(&binder_dead_nodes_lock);