mirror of
https://github.com/torvalds/linux.git
synced 2024-11-25 21:51:40 +00:00
mm: use mmget_not_zero() helper
We already have the helper, we can convert the rest of the kernel mechanically using: git grep -l 'atomic_inc_not_zero.*mm_users' | xargs sed -i 's/atomic_inc_not_zero(&\(.*\)->mm_users)/mmget_not_zero\(\1\)/' This is needed for a later patch that hooks into the helper, but might be a worthwhile cleanup on its own. Link: http://lkml.kernel.org/r/20161218123229.22952-3-vegard.nossum@oracle.com Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3fce371bfa
commit
388f793455
@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (atomic_inc_not_zero(&mm->mm_users)) {
|
||||
if (mmget_not_zero(mm)) {
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages_remote
|
||||
|
@ -579,7 +579,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
||||
if (!svm->mm)
|
||||
goto bad_req;
|
||||
/* If the mm is already defunct, don't handle faults. */
|
||||
if (!atomic_inc_not_zero(&svm->mm->mm_users))
|
||||
if (!mmget_not_zero(svm->mm))
|
||||
goto bad_req;
|
||||
down_read(&svm->mm->mmap_sem);
|
||||
vma = find_extend_vma(svm->mm, address);
|
||||
|
@ -813,7 +813,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
|
||||
return -ENOMEM;
|
||||
|
||||
copied = 0;
|
||||
if (!atomic_inc_not_zero(&mm->mm_users))
|
||||
if (!mmget_not_zero(mm))
|
||||
goto free;
|
||||
|
||||
/* Maybe we should limit FOLL_FORCE to actual ptrace users? */
|
||||
@ -921,7 +921,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
|
||||
return -ENOMEM;
|
||||
|
||||
ret = 0;
|
||||
if (!atomic_inc_not_zero(&mm->mm_users))
|
||||
if (!mmget_not_zero(mm))
|
||||
goto free;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
|
@ -167,7 +167,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
|
||||
return ERR_PTR(-ESRCH);
|
||||
|
||||
mm = priv->mm;
|
||||
if (!mm || !atomic_inc_not_zero(&mm->mm_users))
|
||||
if (!mm || !mmget_not_zero(mm))
|
||||
return NULL;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
@ -1352,7 +1352,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
|
||||
unsigned long end_vaddr;
|
||||
int ret = 0, copied = 0;
|
||||
|
||||
if (!mm || !atomic_inc_not_zero(&mm->mm_users))
|
||||
if (!mm || !mmget_not_zero(mm))
|
||||
goto out;
|
||||
|
||||
ret = -EINVAL;
|
||||
|
@ -219,7 +219,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
|
||||
return ERR_PTR(-ESRCH);
|
||||
|
||||
mm = priv->mm;
|
||||
if (!mm || !atomic_inc_not_zero(&mm->mm_users))
|
||||
if (!mm || !mmget_not_zero(mm))
|
||||
return NULL;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
|
@ -747,7 +747,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
|
||||
if (!mmget_not_zero(vma->vm_mm))
|
||||
continue;
|
||||
|
||||
info = prev;
|
||||
|
@ -1763,7 +1763,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
|
||||
while (swap_count(*swap_map) && !retval &&
|
||||
(p = p->next) != &start_mm->mmlist) {
|
||||
mm = list_entry(p, struct mm_struct, mmlist);
|
||||
if (!atomic_inc_not_zero(&mm->mm_users))
|
||||
if (!mmget_not_zero(mm))
|
||||
continue;
|
||||
spin_unlock(&mmlist_lock);
|
||||
mmput(prev_mm);
|
||||
|
Loading…
Reference in New Issue
Block a user