mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
binder: rename lru shrinker utilities
Now that the page allocation step is done separately we should rename the binder_free_page_range() and binder_allocate_page_range() functions to provide a more accurate description of what they do. Lets borrow the freelist concept used in other parts of the kernel for this. No functional change here. Signed-off-by: Carlos Llamas <cmllamas@google.com> Reviewed-by: Alice Ryhl <aliceryhl@google.com> Link: https://lore.kernel.org/r/20231201172212.1813387-23-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
de0e657312
commit
ea9cdbf0c7
@ -26,7 +26,7 @@
|
|||||||
#include "binder_alloc.h"
|
#include "binder_alloc.h"
|
||||||
#include "binder_trace.h"
|
#include "binder_trace.h"
|
||||||
|
|
||||||
struct list_lru binder_alloc_lru;
|
struct list_lru binder_freelist;
|
||||||
|
|
||||||
static DEFINE_MUTEX(binder_alloc_mmap_lock);
|
static DEFINE_MUTEX(binder_alloc_mmap_lock);
|
||||||
|
|
||||||
@ -190,8 +190,8 @@ binder_get_installed_page(struct binder_lru_page *lru_page)
|
|||||||
return smp_load_acquire(&lru_page->page_ptr);
|
return smp_load_acquire(&lru_page->page_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void binder_free_page_range(struct binder_alloc *alloc,
|
static void binder_lru_freelist_add(struct binder_alloc *alloc,
|
||||||
unsigned long start, unsigned long end)
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
struct binder_lru_page *page;
|
struct binder_lru_page *page;
|
||||||
unsigned long page_addr;
|
unsigned long page_addr;
|
||||||
@ -210,7 +210,7 @@ static void binder_free_page_range(struct binder_alloc *alloc,
|
|||||||
|
|
||||||
trace_binder_free_lru_start(alloc, index);
|
trace_binder_free_lru_start(alloc, index);
|
||||||
|
|
||||||
ret = list_lru_add(&binder_alloc_lru, &page->lru);
|
ret = list_lru_add(&binder_freelist, &page->lru);
|
||||||
WARN_ON(!ret);
|
WARN_ON(!ret);
|
||||||
|
|
||||||
trace_binder_free_lru_end(alloc, index);
|
trace_binder_free_lru_end(alloc, index);
|
||||||
@ -299,14 +299,14 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* The range of pages should exclude those shared with other buffers */
|
/* The range of pages should exclude those shared with other buffers */
|
||||||
static void binder_allocate_page_range(struct binder_alloc *alloc,
|
static void binder_lru_freelist_del(struct binder_alloc *alloc,
|
||||||
unsigned long start, unsigned long end)
|
unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
struct binder_lru_page *page;
|
struct binder_lru_page *page;
|
||||||
unsigned long page_addr;
|
unsigned long page_addr;
|
||||||
|
|
||||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||||
"%d: allocate pages %lx-%lx\n",
|
"%d: pages %lx-%lx\n",
|
||||||
alloc->pid, start, end);
|
alloc->pid, start, end);
|
||||||
|
|
||||||
trace_binder_update_page_range(alloc, true, start, end);
|
trace_binder_update_page_range(alloc, true, start, end);
|
||||||
@ -321,7 +321,7 @@ static void binder_allocate_page_range(struct binder_alloc *alloc,
|
|||||||
if (page->page_ptr) {
|
if (page->page_ptr) {
|
||||||
trace_binder_alloc_lru_start(alloc, index);
|
trace_binder_alloc_lru_start(alloc, index);
|
||||||
|
|
||||||
on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
|
on_lru = list_lru_del(&binder_freelist, &page->lru);
|
||||||
WARN_ON(!on_lru);
|
WARN_ON(!on_lru);
|
||||||
|
|
||||||
trace_binder_alloc_lru_end(alloc, index);
|
trace_binder_alloc_lru_end(alloc, index);
|
||||||
@ -504,8 +504,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
|||||||
end_page_addr = PAGE_ALIGN(buffer->user_data + size);
|
end_page_addr = PAGE_ALIGN(buffer->user_data + size);
|
||||||
if (end_page_addr > has_page_addr)
|
if (end_page_addr > has_page_addr)
|
||||||
end_page_addr = has_page_addr;
|
end_page_addr = has_page_addr;
|
||||||
binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
|
binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
|
||||||
end_page_addr);
|
end_page_addr);
|
||||||
|
|
||||||
rb_erase(&buffer->rb_node, &alloc->free_buffers);
|
rb_erase(&buffer->rb_node, &alloc->free_buffers);
|
||||||
buffer->free = 0;
|
buffer->free = 0;
|
||||||
@ -671,8 +671,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
|
|||||||
alloc->pid, buffer->user_data,
|
alloc->pid, buffer->user_data,
|
||||||
prev->user_data,
|
prev->user_data,
|
||||||
next ? next->user_data : 0);
|
next ? next->user_data : 0);
|
||||||
binder_free_page_range(alloc, buffer_start_page(buffer),
|
binder_lru_freelist_add(alloc, buffer_start_page(buffer),
|
||||||
buffer_start_page(buffer) + PAGE_SIZE);
|
buffer_start_page(buffer) + PAGE_SIZE);
|
||||||
}
|
}
|
||||||
list_del(&buffer->entry);
|
list_del(&buffer->entry);
|
||||||
kfree(buffer);
|
kfree(buffer);
|
||||||
@ -706,8 +706,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|||||||
alloc->pid, size, alloc->free_async_space);
|
alloc->pid, size, alloc->free_async_space);
|
||||||
}
|
}
|
||||||
|
|
||||||
binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
|
binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
|
||||||
(buffer->user_data + buffer_size) & PAGE_MASK);
|
(buffer->user_data + buffer_size) & PAGE_MASK);
|
||||||
|
|
||||||
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
|
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
|
||||||
buffer->free = 1;
|
buffer->free = 1;
|
||||||
@ -953,7 +953,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||||||
if (!alloc->pages[i].page_ptr)
|
if (!alloc->pages[i].page_ptr)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
on_lru = list_lru_del(&binder_alloc_lru,
|
on_lru = list_lru_del(&binder_freelist,
|
||||||
&alloc->pages[i].lru);
|
&alloc->pages[i].lru);
|
||||||
page_addr = alloc->buffer + i * PAGE_SIZE;
|
page_addr = alloc->buffer + i * PAGE_SIZE;
|
||||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||||
@ -1152,13 +1152,13 @@ err_get_alloc_mutex_failed:
|
|||||||
static unsigned long
|
static unsigned long
|
||||||
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
{
|
{
|
||||||
return list_lru_count(&binder_alloc_lru);
|
return list_lru_count(&binder_freelist);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||||
{
|
{
|
||||||
return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
|
return list_lru_walk(&binder_freelist, binder_alloc_free_page,
|
||||||
NULL, sc->nr_to_scan);
|
NULL, sc->nr_to_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1184,13 +1184,13 @@ int binder_alloc_shrinker_init(void)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = list_lru_init(&binder_alloc_lru);
|
ret = list_lru_init(&binder_freelist);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
binder_shrinker = shrinker_alloc(0, "android-binder");
|
binder_shrinker = shrinker_alloc(0, "android-binder");
|
||||||
if (!binder_shrinker) {
|
if (!binder_shrinker) {
|
||||||
list_lru_destroy(&binder_alloc_lru);
|
list_lru_destroy(&binder_freelist);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1205,7 +1205,7 @@ int binder_alloc_shrinker_init(void)
|
|||||||
void binder_alloc_shrinker_exit(void)
|
void binder_alloc_shrinker_exit(void)
|
||||||
{
|
{
|
||||||
shrinker_free(binder_shrinker);
|
shrinker_free(binder_shrinker);
|
||||||
list_lru_destroy(&binder_alloc_lru);
|
list_lru_destroy(&binder_freelist);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
#include <linux/list_lru.h>
|
#include <linux/list_lru.h>
|
||||||
#include <uapi/linux/android/binder.h>
|
#include <uapi/linux/android/binder.h>
|
||||||
|
|
||||||
extern struct list_lru binder_alloc_lru;
|
extern struct list_lru binder_freelist;
|
||||||
struct binder_transaction;
|
struct binder_transaction;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -61,7 +61,7 @@ struct binder_buffer {
|
|||||||
/**
|
/**
|
||||||
* struct binder_lru_page - page object used for binder shrinker
|
* struct binder_lru_page - page object used for binder shrinker
|
||||||
* @page_ptr: pointer to physical page in mmap'd space
|
* @page_ptr: pointer to physical page in mmap'd space
|
||||||
* @lru: entry in binder_alloc_lru
|
* @lru: entry in binder_freelist
|
||||||
* @alloc: binder_alloc for a proc
|
* @alloc: binder_alloc for a proc
|
||||||
*/
|
*/
|
||||||
struct binder_lru_page {
|
struct binder_lru_page {
|
||||||
|
@ -158,8 +158,8 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)
|
|||||||
int i;
|
int i;
|
||||||
unsigned long count;
|
unsigned long count;
|
||||||
|
|
||||||
while ((count = list_lru_count(&binder_alloc_lru))) {
|
while ((count = list_lru_count(&binder_freelist))) {
|
||||||
list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
|
list_lru_walk(&binder_freelist, binder_alloc_free_page,
|
||||||
NULL, count);
|
NULL, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -183,7 +183,7 @@ static void binder_selftest_alloc_free(struct binder_alloc *alloc,
|
|||||||
|
|
||||||
/* Allocate from lru. */
|
/* Allocate from lru. */
|
||||||
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
|
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
|
||||||
if (list_lru_count(&binder_alloc_lru))
|
if (list_lru_count(&binder_freelist))
|
||||||
pr_err("lru list should be empty but is not\n");
|
pr_err("lru list should be empty but is not\n");
|
||||||
|
|
||||||
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
|
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
|
||||||
|
Loading…
Reference in New Issue
Block a user