forked from Minki/linux
mm, hugetlb: use vma_resv_map() map types
Util now, we get a resv_map by two ways according to each mapping type. This makes code dirty and unreadable. Unify it. [davidlohr@hp.com: code cleanups] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Davidlohr Bueso <davidlohr@hp.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f031dd274c
commit
4e35f48385
95
mm/hugetlb.c
95
mm/hugetlb.c
@ -419,13 +419,24 @@ void resv_map_release(struct kref *ref)
|
||||
kfree(resv_map);
|
||||
}
|
||||
|
||||
static inline struct resv_map *inode_resv_map(struct inode *inode)
|
||||
{
|
||||
return inode->i_mapping->private_data;
|
||||
}
|
||||
|
||||
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
|
||||
{
|
||||
VM_BUG_ON(!is_vm_hugetlb_page(vma));
|
||||
if (!(vma->vm_flags & VM_MAYSHARE))
|
||||
if (vma->vm_flags & VM_MAYSHARE) {
|
||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
|
||||
return inode_resv_map(inode);
|
||||
|
||||
} else {
|
||||
return (struct resv_map *)(get_vma_private_data(vma) &
|
||||
~HPAGE_RESV_MASK);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
|
||||
@ -1167,48 +1178,34 @@ static void return_unused_surplus_pages(struct hstate *h,
|
||||
static long vma_needs_reservation(struct hstate *h,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
struct resv_map *resv;
|
||||
pgoff_t idx;
|
||||
long chg;
|
||||
|
||||
if (vma->vm_flags & VM_MAYSHARE) {
|
||||
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
||||
struct resv_map *resv = inode->i_mapping->private_data;
|
||||
|
||||
return region_chg(resv, idx, idx + 1);
|
||||
|
||||
} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
||||
resv = vma_resv_map(vma);
|
||||
if (!resv)
|
||||
return 1;
|
||||
|
||||
} else {
|
||||
long err;
|
||||
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
||||
struct resv_map *resv = vma_resv_map(vma);
|
||||
idx = vma_hugecache_offset(h, vma, addr);
|
||||
chg = region_chg(resv, idx, idx + 1);
|
||||
|
||||
err = region_chg(resv, idx, idx + 1);
|
||||
if (err < 0)
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
if (vma->vm_flags & VM_MAYSHARE)
|
||||
return chg;
|
||||
else
|
||||
return chg < 0 ? chg : 0;
|
||||
}
|
||||
static void vma_commit_reservation(struct hstate *h,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
struct address_space *mapping = vma->vm_file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
struct resv_map *resv;
|
||||
pgoff_t idx;
|
||||
|
||||
if (vma->vm_flags & VM_MAYSHARE) {
|
||||
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
||||
struct resv_map *resv = inode->i_mapping->private_data;
|
||||
resv = vma_resv_map(vma);
|
||||
if (!resv)
|
||||
return;
|
||||
|
||||
region_add(resv, idx, idx + 1);
|
||||
|
||||
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
|
||||
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
|
||||
struct resv_map *resv = vma_resv_map(vma);
|
||||
|
||||
/* Mark this page used in the map. */
|
||||
region_add(resv, idx, idx + 1);
|
||||
}
|
||||
idx = vma_hugecache_offset(h, vma, addr);
|
||||
region_add(resv, idx, idx + 1);
|
||||
}
|
||||
|
||||
static struct page *alloc_huge_page(struct vm_area_struct *vma,
|
||||
@ -2271,7 +2268,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
|
||||
* after this open call completes. It is therefore safe to take a
|
||||
* new reference here without additional locking.
|
||||
*/
|
||||
if (resv)
|
||||
if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
||||
kref_get(&resv->refs);
|
||||
}
|
||||
|
||||
@ -2280,23 +2277,21 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
struct resv_map *resv = vma_resv_map(vma);
|
||||
struct hugepage_subpool *spool = subpool_vma(vma);
|
||||
unsigned long reserve;
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned long reserve, start, end;
|
||||
|
||||
if (resv) {
|
||||
start = vma_hugecache_offset(h, vma, vma->vm_start);
|
||||
end = vma_hugecache_offset(h, vma, vma->vm_end);
|
||||
if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
|
||||
return;
|
||||
|
||||
reserve = (end - start) -
|
||||
region_count(resv, start, end);
|
||||
start = vma_hugecache_offset(h, vma, vma->vm_start);
|
||||
end = vma_hugecache_offset(h, vma, vma->vm_end);
|
||||
|
||||
kref_put(&resv->refs, resv_map_release);
|
||||
reserve = (end - start) - region_count(resv, start, end);
|
||||
|
||||
if (reserve) {
|
||||
hugetlb_acct_memory(h, -reserve);
|
||||
hugepage_subpool_put_pages(spool, reserve);
|
||||
}
|
||||
kref_put(&resv->refs, resv_map_release);
|
||||
|
||||
if (reserve) {
|
||||
hugetlb_acct_memory(h, -reserve);
|
||||
hugepage_subpool_put_pages(spool, reserve);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3189,7 +3184,7 @@ int hugetlb_reserve_pages(struct inode *inode,
|
||||
* called to make the mapping read-write. Assume !vma is a shm mapping
|
||||
*/
|
||||
if (!vma || vma->vm_flags & VM_MAYSHARE) {
|
||||
resv_map = inode->i_mapping->private_data;
|
||||
resv_map = inode_resv_map(inode);
|
||||
|
||||
chg = region_chg(resv_map, from, to);
|
||||
|
||||
@ -3248,7 +3243,7 @@ out_err:
|
||||
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
|
||||
{
|
||||
struct hstate *h = hstate_inode(inode);
|
||||
struct resv_map *resv_map = inode->i_mapping->private_data;
|
||||
struct resv_map *resv_map = inode_resv_map(inode);
|
||||
long chg = 0;
|
||||
struct hugepage_subpool *spool = subpool_inode(inode);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user