mirror of
https://github.com/torvalds/linux.git
synced 2024-12-15 23:51:46 +00:00
staging: ion: Avoid using rt_mutexes directly
RT_MUTEXES can be configured out of the kernel, causing compile problems with ION. To quote Colin: "rt_mutexes were added with the deferred freeing feature. Heaps need to return zeroed memory to userspace, but zeroing the memory on every allocation was causing performance issues. We added a SCHED_IDLE thread to zero memory in the background after freeing, but locking the heap from the SCHED_IDLE thread might block a high priority allocation thread for a long time. The lock is only used to protect the heap's free_list and free_list_size members, and is not held for any long or sleeping operations. Converting to a spinlock should prevent priority inversion without using the rt_mutex. I'd also rename it to free_lock to so it doesn't get used as a general heap lock." Thus this patch converts the rt_mutex usage to a spinlock and renames the lock free_lock to be more clear as to its use. I also had to change a bit of logic in ion_heap_freelist_drain() to safely avoid list corruption. Acked-by: Colin Cross <ccross@android.com> Cc: Android Kernel Team <kernel-team@android.com> Reported-by: Jim Davis <jim.epost@gmail.com> Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
7dcfee65a1
commit
6a72a700f4
@ -160,10 +160,10 @@ int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
|
||||
|
||||
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
|
||||
{
|
||||
rt_mutex_lock(&heap->lock);
|
||||
spin_lock(&heap->free_lock);
|
||||
list_add(&buffer->list, &heap->free_list);
|
||||
heap->free_list_size += buffer->size;
|
||||
rt_mutex_unlock(&heap->lock);
|
||||
spin_unlock(&heap->free_lock);
|
||||
wake_up(&heap->waitqueue);
|
||||
}
|
||||
|
||||
@ -171,34 +171,38 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
rt_mutex_lock(&heap->lock);
|
||||
spin_lock(&heap->free_lock);
|
||||
size = heap->free_list_size;
|
||||
rt_mutex_unlock(&heap->lock);
|
||||
spin_unlock(&heap->free_lock);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
|
||||
{
|
||||
struct ion_buffer *buffer, *tmp;
|
||||
struct ion_buffer *buffer;
|
||||
size_t total_drained = 0;
|
||||
|
||||
if (ion_heap_freelist_size(heap) == 0)
|
||||
return 0;
|
||||
|
||||
rt_mutex_lock(&heap->lock);
|
||||
spin_lock(&heap->free_lock);
|
||||
if (size == 0)
|
||||
size = heap->free_list_size;
|
||||
|
||||
list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) {
|
||||
while (!list_empty(&heap->free_list)) {
|
||||
if (total_drained >= size)
|
||||
break;
|
||||
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
|
||||
list);
|
||||
list_del(&buffer->list);
|
||||
heap->free_list_size -= buffer->size;
|
||||
total_drained += buffer->size;
|
||||
spin_unlock(&heap->free_lock);
|
||||
ion_buffer_destroy(buffer);
|
||||
spin_lock(&heap->free_lock);
|
||||
}
|
||||
rt_mutex_unlock(&heap->lock);
|
||||
spin_unlock(&heap->free_lock);
|
||||
|
||||
return total_drained;
|
||||
}
|
||||
@ -213,16 +217,16 @@ static int ion_heap_deferred_free(void *data)
|
||||
wait_event_freezable(heap->waitqueue,
|
||||
ion_heap_freelist_size(heap) > 0);
|
||||
|
||||
rt_mutex_lock(&heap->lock);
|
||||
spin_lock(&heap->free_lock);
|
||||
if (list_empty(&heap->free_list)) {
|
||||
rt_mutex_unlock(&heap->lock);
|
||||
spin_unlock(&heap->free_lock);
|
||||
continue;
|
||||
}
|
||||
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
|
||||
list);
|
||||
list_del(&buffer->list);
|
||||
heap->free_list_size -= buffer->size;
|
||||
rt_mutex_unlock(&heap->lock);
|
||||
spin_unlock(&heap->free_lock);
|
||||
ion_buffer_destroy(buffer);
|
||||
}
|
||||
|
||||
@ -235,7 +239,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
|
||||
|
||||
INIT_LIST_HEAD(&heap->free_list);
|
||||
heap->free_list_size = 0;
|
||||
rt_mutex_init(&heap->lock);
|
||||
spin_lock_init(&heap->free_lock);
|
||||
init_waitqueue_head(&heap->waitqueue);
|
||||
heap->task = kthread_run(ion_heap_deferred_free, heap,
|
||||
"%s", heap->name);
|
||||
|
@ -159,7 +159,7 @@ struct ion_heap {
|
||||
struct shrinker shrinker;
|
||||
struct list_head free_list;
|
||||
size_t free_list_size;
|
||||
struct rt_mutex lock;
|
||||
spinlock_t free_lock;
|
||||
wait_queue_head_t waitqueue;
|
||||
struct task_struct *task;
|
||||
int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
|
||||
|
Loading…
Reference in New Issue
Block a user