Merge tag 'char-misc-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big pull request for char/misc drivers for 4.16-rc1. There's a lot of stuff in here. Three new driver subsystems were added for various types of hardware busses: - siox - slimbus - soundwire as well as a new vboxguest subsystem for the VirtualBox hypervisor drivers. There's also big updates from the FPGA subsystem, lots of Android binder fixes, the usual handful of hyper-v updates, and lots of other smaller driver updates. All of these have been in linux-next for a long time, with no reported issues" * tag 'char-misc-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (155 commits) char: lp: use true or false for boolean values android: binder: use VM_ALLOC to get vm area android: binder: Use true and false for boolean values lkdtm: fix handle_irq_event symbol for INT_HW_IRQ_EN EISA: Delete error message for a failed memory allocation in eisa_probe() EISA: Whitespace cleanup misc: remove AVR32 dependencies virt: vbox: Add error mapping for VERR_INVALID_NAME and VERR_NO_MORE_FILES soundwire: Fix a signedness bug uio_hv_generic: fix new type mismatch warnings uio_hv_generic: fix type mismatch warnings auxdisplay: img-ascii-lcd: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE uio_hv_generic: add rescind support uio_hv_generic: check that host supports monitor page uio_hv_generic: create send and receive buffers uio: document uio_hv_generic regions doc: fix documentation about uio_hv_generic vmbus: add monitor_id and subchannel_id to sysfs per channel vmbus: fix ABI documentation uio_hv_generic: use ISR callback method ...
This commit is contained in:
@@ -141,7 +141,7 @@ enum {
|
||||
};
|
||||
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
|
||||
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
|
||||
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
|
||||
module_param_named(debug_mask, binder_debug_mask, uint, 0644);
|
||||
|
||||
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
|
||||
module_param_named(devices, binder_devices_param, charp, 0444);
|
||||
@@ -160,7 +160,7 @@ static int binder_set_stop_on_user_error(const char *val,
|
||||
return ret;
|
||||
}
|
||||
module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
|
||||
param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
|
||||
param_get_int, &binder_stop_on_user_error, 0644);
|
||||
|
||||
#define binder_debug(mask, x...) \
|
||||
do { \
|
||||
@@ -249,7 +249,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
|
||||
unsigned int cur = atomic_inc_return(&log->cur);
|
||||
|
||||
if (cur >= ARRAY_SIZE(log->entry))
|
||||
log->full = 1;
|
||||
log->full = true;
|
||||
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
|
||||
WRITE_ONCE(e->debug_id_done, 0);
|
||||
/*
|
||||
@@ -493,8 +493,6 @@ enum binder_deferred_state {
|
||||
* (protected by @inner_lock)
|
||||
* @todo: list of work for this process
|
||||
* (protected by @inner_lock)
|
||||
* @wait: wait queue head to wait for proc work
|
||||
* (invariant after initialized)
|
||||
* @stats: per-process binder statistics
|
||||
* (atomics, no lock needed)
|
||||
* @delivered_death: list of delivered death notification
|
||||
@@ -537,7 +535,6 @@ struct binder_proc {
|
||||
bool is_dead;
|
||||
|
||||
struct list_head todo;
|
||||
wait_queue_head_t wait;
|
||||
struct binder_stats stats;
|
||||
struct list_head delivered_death;
|
||||
int max_threads;
|
||||
@@ -579,6 +576,8 @@ enum {
|
||||
* (protected by @proc->inner_lock)
|
||||
* @todo: list of work to do for this thread
|
||||
* (protected by @proc->inner_lock)
|
||||
* @process_todo: whether work in @todo should be processed
|
||||
* (protected by @proc->inner_lock)
|
||||
* @return_error: transaction errors reported by this thread
|
||||
* (only accessed by this thread)
|
||||
* @reply_error: transaction errors reported by target thread
|
||||
@@ -604,6 +603,7 @@ struct binder_thread {
|
||||
bool looper_need_return; /* can be written by other thread */
|
||||
struct binder_transaction *transaction_stack;
|
||||
struct list_head todo;
|
||||
bool process_todo;
|
||||
struct binder_error return_error;
|
||||
struct binder_error reply_error;
|
||||
wait_queue_head_t wait;
|
||||
@@ -789,6 +789,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_enqueue_work_ilocked() - Add an item to the work list
|
||||
* @work: struct binder_work to add to list
|
||||
* @target_list: list to add work to
|
||||
*
|
||||
* Adds the work to the specified list. Asserts that work
|
||||
* is not already on a list.
|
||||
*
|
||||
* Requires the proc->inner_lock to be held.
|
||||
*/
|
||||
static void
|
||||
binder_enqueue_work_ilocked(struct binder_work *work,
|
||||
struct list_head *target_list)
|
||||
@@ -799,22 +809,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_enqueue_work() - Add an item to the work list
|
||||
* @proc: binder_proc associated with list
|
||||
* binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
|
||||
* @thread: thread to queue work to
|
||||
* @work: struct binder_work to add to list
|
||||
* @target_list: list to add work to
|
||||
*
|
||||
* Adds the work to the specified list. Asserts that work
|
||||
* is not already on a list.
|
||||
* Adds the work to the todo list of the thread. Doesn't set the process_todo
|
||||
* flag, which means that (if it wasn't already set) the thread will go to
|
||||
* sleep without handling this work when it calls read.
|
||||
*
|
||||
* Requires the proc->inner_lock to be held.
|
||||
*/
|
||||
static void
|
||||
binder_enqueue_work(struct binder_proc *proc,
|
||||
struct binder_work *work,
|
||||
struct list_head *target_list)
|
||||
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
|
||||
struct binder_work *work)
|
||||
{
|
||||
binder_inner_proc_lock(proc);
|
||||
binder_enqueue_work_ilocked(work, target_list);
|
||||
binder_inner_proc_unlock(proc);
|
||||
binder_enqueue_work_ilocked(work, &thread->todo);
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
|
||||
* @thread: thread to queue work to
|
||||
* @work: struct binder_work to add to list
|
||||
*
|
||||
* Adds the work to the todo list of the thread, and enables processing
|
||||
* of the todo queue.
|
||||
*
|
||||
* Requires the proc->inner_lock to be held.
|
||||
*/
|
||||
static void
|
||||
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
|
||||
struct binder_work *work)
|
||||
{
|
||||
binder_enqueue_work_ilocked(work, &thread->todo);
|
||||
thread->process_todo = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_enqueue_thread_work() - Add an item to the thread work list
|
||||
* @thread: thread to queue work to
|
||||
* @work: struct binder_work to add to list
|
||||
*
|
||||
* Adds the work to the todo list of the thread, and enables processing
|
||||
* of the todo queue.
|
||||
*/
|
||||
static void
|
||||
binder_enqueue_thread_work(struct binder_thread *thread,
|
||||
struct binder_work *work)
|
||||
{
|
||||
binder_inner_proc_lock(thread->proc);
|
||||
binder_enqueue_thread_work_ilocked(thread, work);
|
||||
binder_inner_proc_unlock(thread->proc);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -940,7 +984,7 @@ err:
|
||||
static bool binder_has_work_ilocked(struct binder_thread *thread,
|
||||
bool do_proc_work)
|
||||
{
|
||||
return !binder_worklist_empty_ilocked(&thread->todo) ||
|
||||
return thread->process_todo ||
|
||||
thread->looper_need_return ||
|
||||
(do_proc_work &&
|
||||
!binder_worklist_empty_ilocked(&thread->proc->todo));
|
||||
@@ -1228,6 +1272,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
||||
node->local_strong_refs++;
|
||||
if (!node->has_strong_ref && target_list) {
|
||||
binder_dequeue_work_ilocked(&node->work);
|
||||
/*
|
||||
* Note: this function is the only place where we queue
|
||||
* directly to a thread->todo without using the
|
||||
* corresponding binder_enqueue_thread_work() helper
|
||||
* functions; in this case it's ok to not set the
|
||||
* process_todo flag, since we know this node work will
|
||||
* always be followed by other work that starts queue
|
||||
* processing: in case of synchronous transactions, a
|
||||
* BR_REPLY or BR_ERROR; in case of oneway
|
||||
* transactions, a BR_TRANSACTION_COMPLETE.
|
||||
*/
|
||||
binder_enqueue_work_ilocked(&node->work, target_list);
|
||||
}
|
||||
} else {
|
||||
@@ -1239,6 +1294,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
||||
node->debug_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* See comment above
|
||||
*/
|
||||
binder_enqueue_work_ilocked(&node->work, target_list);
|
||||
}
|
||||
}
|
||||
@@ -1928,9 +1986,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
|
||||
binder_pop_transaction_ilocked(target_thread, t);
|
||||
if (target_thread->reply_error.cmd == BR_OK) {
|
||||
target_thread->reply_error.cmd = error_code;
|
||||
binder_enqueue_work_ilocked(
|
||||
&target_thread->reply_error.work,
|
||||
&target_thread->todo);
|
||||
binder_enqueue_thread_work_ilocked(
|
||||
target_thread,
|
||||
&target_thread->reply_error.work);
|
||||
wake_up_interruptible(&target_thread->wait);
|
||||
} else {
|
||||
WARN(1, "Unexpected reply error: %u\n",
|
||||
@@ -2569,20 +2627,18 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
||||
struct binder_proc *proc,
|
||||
struct binder_thread *thread)
|
||||
{
|
||||
struct list_head *target_list = NULL;
|
||||
struct binder_node *node = t->buffer->target_node;
|
||||
bool oneway = !!(t->flags & TF_ONE_WAY);
|
||||
bool wakeup = true;
|
||||
bool pending_async = false;
|
||||
|
||||
BUG_ON(!node);
|
||||
binder_node_lock(node);
|
||||
if (oneway) {
|
||||
BUG_ON(thread);
|
||||
if (node->has_async_transaction) {
|
||||
target_list = &node->async_todo;
|
||||
wakeup = false;
|
||||
pending_async = true;
|
||||
} else {
|
||||
node->has_async_transaction = 1;
|
||||
node->has_async_transaction = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2594,19 +2650,17 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!thread && !target_list)
|
||||
if (!thread && !pending_async)
|
||||
thread = binder_select_thread_ilocked(proc);
|
||||
|
||||
if (thread)
|
||||
target_list = &thread->todo;
|
||||
else if (!target_list)
|
||||
target_list = &proc->todo;
|
||||
binder_enqueue_thread_work_ilocked(thread, &t->work);
|
||||
else if (!pending_async)
|
||||
binder_enqueue_work_ilocked(&t->work, &proc->todo);
|
||||
else
|
||||
BUG_ON(target_list != &node->async_todo);
|
||||
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
|
||||
|
||||
binder_enqueue_work_ilocked(&t->work, target_list);
|
||||
|
||||
if (wakeup)
|
||||
if (!pending_async)
|
||||
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
||||
|
||||
binder_inner_proc_unlock(proc);
|
||||
@@ -3101,10 +3155,10 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
}
|
||||
}
|
||||
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
|
||||
binder_enqueue_work(proc, tcomplete, &thread->todo);
|
||||
t->work.type = BINDER_WORK_TRANSACTION;
|
||||
|
||||
if (reply) {
|
||||
binder_enqueue_thread_work(thread, tcomplete);
|
||||
binder_inner_proc_lock(target_proc);
|
||||
if (target_thread->is_dead) {
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
@@ -3112,13 +3166,21 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
}
|
||||
BUG_ON(t->buffer->async_transaction != 0);
|
||||
binder_pop_transaction_ilocked(target_thread, in_reply_to);
|
||||
binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
|
||||
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
wake_up_interruptible_sync(&target_thread->wait);
|
||||
binder_free_transaction(in_reply_to);
|
||||
} else if (!(t->flags & TF_ONE_WAY)) {
|
||||
BUG_ON(t->buffer->async_transaction != 0);
|
||||
binder_inner_proc_lock(proc);
|
||||
/*
|
||||
* Defer the TRANSACTION_COMPLETE, so we don't return to
|
||||
* userspace immediately; this allows the target process to
|
||||
* immediately start processing this transaction, reducing
|
||||
* latency. We will then return the TRANSACTION_COMPLETE when
|
||||
* the target replies (or there is an error).
|
||||
*/
|
||||
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
|
||||
t->need_reply = 1;
|
||||
t->from_parent = thread->transaction_stack;
|
||||
thread->transaction_stack = t;
|
||||
@@ -3132,6 +3194,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
} else {
|
||||
BUG_ON(target_node == NULL);
|
||||
BUG_ON(t->buffer->async_transaction != 1);
|
||||
binder_enqueue_thread_work(thread, tcomplete);
|
||||
if (!binder_proc_transaction(t, target_proc, NULL))
|
||||
goto err_dead_proc_or_thread;
|
||||
}
|
||||
@@ -3210,15 +3273,11 @@ err_invalid_target_handle:
|
||||
BUG_ON(thread->return_error.cmd != BR_OK);
|
||||
if (in_reply_to) {
|
||||
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
|
||||
binder_enqueue_work(thread->proc,
|
||||
&thread->return_error.work,
|
||||
&thread->todo);
|
||||
binder_enqueue_thread_work(thread, &thread->return_error.work);
|
||||
binder_send_failed_reply(in_reply_to, return_error);
|
||||
} else {
|
||||
thread->return_error.cmd = return_error;
|
||||
binder_enqueue_work(thread->proc,
|
||||
&thread->return_error.work,
|
||||
&thread->todo);
|
||||
binder_enqueue_thread_work(thread, &thread->return_error.work);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3424,7 +3483,7 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
w = binder_dequeue_work_head_ilocked(
|
||||
&buf_node->async_todo);
|
||||
if (!w) {
|
||||
buf_node->has_async_transaction = 0;
|
||||
buf_node->has_async_transaction = false;
|
||||
} else {
|
||||
binder_enqueue_work_ilocked(
|
||||
w, &proc->todo);
|
||||
@@ -3522,10 +3581,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
WARN_ON(thread->return_error.cmd !=
|
||||
BR_OK);
|
||||
thread->return_error.cmd = BR_ERROR;
|
||||
binder_enqueue_work(
|
||||
thread->proc,
|
||||
&thread->return_error.work,
|
||||
&thread->todo);
|
||||
binder_enqueue_thread_work(
|
||||
thread,
|
||||
&thread->return_error.work);
|
||||
binder_debug(
|
||||
BINDER_DEBUG_FAILED_TRANSACTION,
|
||||
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
|
||||
@@ -3605,9 +3663,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
if (thread->looper &
|
||||
(BINDER_LOOPER_STATE_REGISTERED |
|
||||
BINDER_LOOPER_STATE_ENTERED))
|
||||
binder_enqueue_work_ilocked(
|
||||
&death->work,
|
||||
&thread->todo);
|
||||
binder_enqueue_thread_work_ilocked(
|
||||
thread,
|
||||
&death->work);
|
||||
else {
|
||||
binder_enqueue_work_ilocked(
|
||||
&death->work,
|
||||
@@ -3662,8 +3720,8 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
if (thread->looper &
|
||||
(BINDER_LOOPER_STATE_REGISTERED |
|
||||
BINDER_LOOPER_STATE_ENTERED))
|
||||
binder_enqueue_work_ilocked(
|
||||
&death->work, &thread->todo);
|
||||
binder_enqueue_thread_work_ilocked(
|
||||
thread, &death->work);
|
||||
else {
|
||||
binder_enqueue_work_ilocked(
|
||||
&death->work,
|
||||
@@ -3837,6 +3895,8 @@ retry:
|
||||
break;
|
||||
}
|
||||
w = binder_dequeue_work_head_ilocked(list);
|
||||
if (binder_worklist_empty_ilocked(&thread->todo))
|
||||
thread->process_todo = false;
|
||||
|
||||
switch (w->type) {
|
||||
case BINDER_WORK_TRANSACTION: {
|
||||
@@ -4302,6 +4362,18 @@ static int binder_thread_release(struct binder_proc *proc,
|
||||
if (t)
|
||||
spin_lock(&t->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If this thread used poll, make sure we remove the waitqueue
|
||||
* from any epoll data structures holding it with POLLFREE.
|
||||
* waitqueue_active() is safe to use here because we're holding
|
||||
* the inner lock.
|
||||
*/
|
||||
if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
|
||||
waitqueue_active(&thread->wait)) {
|
||||
wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
|
||||
}
|
||||
|
||||
binder_inner_proc_unlock(thread->proc);
|
||||
|
||||
if (send_reply)
|
||||
@@ -4646,7 +4718,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
return 0;
|
||||
|
||||
err_bad_arg:
|
||||
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
|
||||
pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
|
||||
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
|
||||
return ret;
|
||||
}
|
||||
@@ -4656,7 +4728,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
||||
struct binder_proc *proc;
|
||||
struct binder_device *binder_dev;
|
||||
|
||||
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
|
||||
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
|
||||
current->group_leader->pid, current->pid);
|
||||
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
@@ -4695,7 +4767,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
||||
* anyway print all contexts that a given PID has, so this
|
||||
* is not a problem.
|
||||
*/
|
||||
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
|
||||
proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
|
||||
binder_debugfs_dir_entry_proc,
|
||||
(void *)(unsigned long)proc->pid,
|
||||
&binder_proc_fops);
|
||||
@@ -5524,7 +5596,9 @@ static int __init binder_init(void)
|
||||
struct binder_device *device;
|
||||
struct hlist_node *tmp;
|
||||
|
||||
binder_alloc_shrinker_init();
|
||||
ret = binder_alloc_shrinker_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
atomic_set(&binder_transaction_log.cur, ~0U);
|
||||
atomic_set(&binder_transaction_log_failed.cur, ~0U);
|
||||
@@ -5536,27 +5610,27 @@ static int __init binder_init(void)
|
||||
|
||||
if (binder_debugfs_dir_entry_root) {
|
||||
debugfs_create_file("state",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
NULL,
|
||||
&binder_state_fops);
|
||||
debugfs_create_file("stats",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
NULL,
|
||||
&binder_stats_fops);
|
||||
debugfs_create_file("transactions",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
NULL,
|
||||
&binder_transactions_fops);
|
||||
debugfs_create_file("transaction_log",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
&binder_transaction_log,
|
||||
&binder_transaction_log_fops);
|
||||
debugfs_create_file("failed_transaction_log",
|
||||
S_IRUGO,
|
||||
0444,
|
||||
binder_debugfs_dir_entry_root,
|
||||
&binder_transaction_log_failed,
|
||||
&binder_transaction_log_fops);
|
||||
|
||||
@@ -281,6 +281,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
goto err_vm_insert_page_failed;
|
||||
}
|
||||
|
||||
if (index + 1 > alloc->pages_high)
|
||||
alloc->pages_high = index + 1;
|
||||
|
||||
trace_binder_alloc_page_end(alloc, index);
|
||||
/* vm_insert_page does not seem to increment the refcount */
|
||||
}
|
||||
@@ -324,11 +327,12 @@ err_no_vma:
|
||||
return vma ? -ENOMEM : -ESRCH;
|
||||
}
|
||||
|
||||
struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async)
|
||||
static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async)
|
||||
{
|
||||
struct rb_node *n = alloc->free_buffers.rb_node;
|
||||
struct binder_buffer *buffer;
|
||||
@@ -666,7 +670,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
goto err_already_mapped;
|
||||
}
|
||||
|
||||
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
|
||||
area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
|
||||
if (area == NULL) {
|
||||
ret = -ENOMEM;
|
||||
failure_string = "get_vm_area";
|
||||
@@ -853,6 +857,7 @@ void binder_alloc_print_pages(struct seq_file *m,
|
||||
}
|
||||
mutex_unlock(&alloc->mutex);
|
||||
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
|
||||
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1002,8 +1007,14 @@ void binder_alloc_init(struct binder_alloc *alloc)
|
||||
INIT_LIST_HEAD(&alloc->buffers);
|
||||
}
|
||||
|
||||
void binder_alloc_shrinker_init(void)
|
||||
int binder_alloc_shrinker_init(void)
|
||||
{
|
||||
list_lru_init(&binder_alloc_lru);
|
||||
register_shrinker(&binder_shrinker);
|
||||
int ret = list_lru_init(&binder_alloc_lru);
|
||||
|
||||
if (ret == 0) {
|
||||
ret = register_shrinker(&binder_shrinker);
|
||||
if (ret)
|
||||
list_lru_destroy(&binder_alloc_lru);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -92,6 +92,7 @@ struct binder_lru_page {
|
||||
* @pages: array of binder_lru_page
|
||||
* @buffer_size: size of address space specified via mmap
|
||||
* @pid: pid for associated binder_proc (invariant after init)
|
||||
* @pages_high: high watermark of offset in @pages
|
||||
*
|
||||
* Bookkeeping structure for per-proc address space management for binder
|
||||
* buffers. It is normally initialized during binder_init() and binder_mmap()
|
||||
@@ -112,6 +113,7 @@ struct binder_alloc {
|
||||
size_t buffer_size;
|
||||
uint32_t buffer_free;
|
||||
int pid;
|
||||
size_t pages_high;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
|
||||
@@ -128,7 +130,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t extra_buffers_size,
|
||||
int is_async);
|
||||
extern void binder_alloc_init(struct binder_alloc *alloc);
|
||||
void binder_alloc_shrinker_init(void);
|
||||
extern int binder_alloc_shrinker_init(void);
|
||||
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
|
||||
extern struct binder_buffer *
|
||||
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
||||
|
||||
Reference in New Issue
Block a user