mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
binder: malloc new_buffer outside of locks
Preallocate new_buffer before acquiring the alloc->mutex and hand it down to binder_alloc_new_buf_locked(). The new buffer will be used in the vast majority of requests (measured at 98.2% in field data). The buffer is discarded otherwise. This change is required in preparation for transitioning alloc->mutex into a spinlock in subsequent commits. Signed-off-by: Carlos Llamas <cmllamas@google.com> Reviewed-by: Alice Ryhl <aliceryhl@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-18-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
ea2735ce19
commit
c7ac30fad1
@ -395,24 +395,27 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Callers preallocate @new_buffer, it is freed by this function if unused */
|
||||
static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
struct binder_alloc *alloc,
|
||||
struct binder_buffer *new_buffer,
|
||||
size_t size,
|
||||
int is_async)
|
||||
{
|
||||
struct rb_node *n = alloc->free_buffers.rb_node;
|
||||
struct binder_buffer *buffer;
|
||||
size_t buffer_size;
|
||||
struct rb_node *best_fit = NULL;
|
||||
struct binder_buffer *buffer;
|
||||
unsigned long has_page_addr;
|
||||
unsigned long end_page_addr;
|
||||
size_t buffer_size;
|
||||
int ret;
|
||||
|
||||
if (is_async && alloc->free_async_space < size) {
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: binder_alloc_buf size %zd failed, no async space left\n",
|
||||
alloc->pid, size);
|
||||
return ERR_PTR(-ENOSPC);
|
||||
buffer = ERR_PTR(-ENOSPC);
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (n) {
|
||||
@ -436,7 +439,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
"%d: binder_alloc_buf size %zd failed, no address space\n",
|
||||
alloc->pid, size);
|
||||
debug_no_space_locked(alloc);
|
||||
return ERR_PTR(-ENOSPC);
|
||||
buffer = ERR_PTR(-ENOSPC);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (n == NULL) {
|
||||
@ -455,22 +459,17 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
end_page_addr = has_page_addr;
|
||||
ret = binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
|
||||
end_page_addr);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
if (ret) {
|
||||
buffer = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (buffer_size != size) {
|
||||
struct binder_buffer *new_buffer;
|
||||
|
||||
new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
|
||||
if (!new_buffer) {
|
||||
pr_err("%s: %d failed to alloc new buffer struct\n",
|
||||
__func__, alloc->pid);
|
||||
goto err_alloc_buf_struct_failed;
|
||||
}
|
||||
new_buffer->user_data = buffer->user_data + size;
|
||||
list_add(&new_buffer->entry, &buffer->entry);
|
||||
new_buffer->free = 1;
|
||||
binder_insert_free_buffer(alloc, new_buffer);
|
||||
new_buffer = NULL;
|
||||
}
|
||||
|
||||
rb_erase(best_fit, &alloc->free_buffers);
|
||||
@ -491,12 +490,10 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
buffer->oneway_spam_suspect = true;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Discard possibly unused new_buffer */
|
||||
kfree(new_buffer);
|
||||
return buffer;
|
||||
|
||||
err_alloc_buf_struct_failed:
|
||||
binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
|
||||
end_page_addr);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Calculate the sanitized total size, returns 0 for invalid request */
|
||||
@ -542,7 +539,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t extra_buffers_size,
|
||||
int is_async)
|
||||
{
|
||||
struct binder_buffer *buffer;
|
||||
struct binder_buffer *buffer, *next;
|
||||
size_t size;
|
||||
|
||||
/* Check binder_alloc is fully initialized */
|
||||
@ -562,8 +559,13 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Preallocate the next buffer */
|
||||
next = kzalloc(sizeof(*next), GFP_KERNEL);
|
||||
if (!next)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
buffer = binder_alloc_new_buf_locked(alloc, size, is_async);
|
||||
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
|
||||
if (IS_ERR(buffer)) {
|
||||
mutex_unlock(&alloc->mutex);
|
||||
goto out;
|
||||
|
Loading…
Reference in New Issue
Block a user