mm, fs: introduce mapping_gfp_constraint()
There are many places which use mapping_gfp_mask to restrict a more generic gfp mask which would be used for allocations which are not directly related to the page cache but they are performed in the same context. Let's introduce a helper function which makes the restriction explicit and easier to track. This patch doesn't introduce any functional changes. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Michal Hocko <mhocko@suse.com> Suggested-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									8990332760
								
							
						
					
					
						commit
						c62d25556b
					
				| @ -491,7 +491,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) | ||||
| 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) | ||||
| 		 * so shmem can relocate pages during swapin if required. | ||||
| 		 */ | ||||
| 		BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) && | ||||
| 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && | ||||
| 				(page_to_pfn(p) >= 0x00100000UL)); | ||||
| 	} | ||||
| 
 | ||||
|  | ||||
| @ -2214,9 +2214,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | ||||
| 	 * Fail silently without starting the shrinker | ||||
| 	 */ | ||||
| 	mapping = file_inode(obj->base.filp)->i_mapping; | ||||
| 	gfp = mapping_gfp_mask(mapping); | ||||
| 	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); | ||||
| 	gfp |= __GFP_NORETRY | __GFP_NOWARN; | ||||
| 	gfp &= ~(__GFP_IO | __GFP_RECLAIM); | ||||
| 	sg = st->sgl; | ||||
| 	st->nents = 0; | ||||
| 	for (i = 0; i < page_count; i++) { | ||||
|  | ||||
| @ -482,13 +482,12 @@ static noinline int add_ra_bio_pages(struct inode *inode, | ||||
| 			goto next; | ||||
| 		} | ||||
| 
 | ||||
| 		page = __page_cache_alloc(mapping_gfp_mask(mapping) & | ||||
| 								~__GFP_FS); | ||||
| 		page = __page_cache_alloc(mapping_gfp_constraint(mapping, | ||||
| 								 ~__GFP_FS)); | ||||
| 		if (!page) | ||||
| 			break; | ||||
| 
 | ||||
| 		if (add_to_page_cache_lru(page, mapping, pg_index, | ||||
| 								GFP_NOFS)) { | ||||
| 		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { | ||||
| 			page_cache_release(page); | ||||
| 			goto next; | ||||
| 		} | ||||
|  | ||||
| @ -3316,7 +3316,7 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) | ||||
| 
 | ||||
| static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) | ||||
| { | ||||
| 	return mapping_gfp_mask(mapping) & ~__GFP_FS; | ||||
| 	return mapping_gfp_constraint(mapping, ~__GFP_FS); | ||||
| } | ||||
| 
 | ||||
| /* extent-tree.c */ | ||||
|  | ||||
| @ -85,8 +85,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, | ||||
| 	} | ||||
| 
 | ||||
| 	mapping_set_gfp_mask(inode->i_mapping, | ||||
| 			mapping_gfp_mask(inode->i_mapping) & | ||||
| 			~(__GFP_FS | __GFP_HIGHMEM)); | ||||
| 			mapping_gfp_constraint(inode->i_mapping, | ||||
| 			~(__GFP_FS | __GFP_HIGHMEM))); | ||||
| 
 | ||||
| 	return inode; | ||||
| } | ||||
|  | ||||
| @ -999,7 +999,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, | ||||
| 	int ret = 0;		/* Will call free_more_memory() */ | ||||
| 	gfp_t gfp_mask; | ||||
| 
 | ||||
| 	gfp_mask = (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS) | gfp; | ||||
| 	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * XXX: __getblk_slow() can not really deal with failure and | ||||
|  | ||||
| @ -1283,8 +1283,8 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||||
| 		int ret1; | ||||
| 		struct address_space *mapping = inode->i_mapping; | ||||
| 		struct page *page = find_or_create_page(mapping, 0, | ||||
| 						mapping_gfp_mask(mapping) & | ||||
| 						~__GFP_FS); | ||||
| 						mapping_gfp_constraint(mapping, | ||||
| 						~__GFP_FS)); | ||||
| 		if (!page) { | ||||
| 			ret = VM_FAULT_OOM; | ||||
| 			goto out; | ||||
| @ -1428,7 +1428,8 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, | ||||
| 		if (i_size_read(inode) == 0) | ||||
| 			return; | ||||
| 		page = find_or_create_page(mapping, 0, | ||||
| 					   mapping_gfp_mask(mapping) & ~__GFP_FS); | ||||
| 					   mapping_gfp_constraint(mapping, | ||||
| 					   ~__GFP_FS)); | ||||
| 		if (!page) | ||||
| 			return; | ||||
| 		if (PageUptodate(page)) { | ||||
|  | ||||
| @ -3380,7 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, | ||||
| 	struct page *page, *tpage; | ||||
| 	unsigned int expected_index; | ||||
| 	int rc; | ||||
| 	gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping); | ||||
| 	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); | ||||
| 
 | ||||
| 	INIT_LIST_HEAD(tmplist); | ||||
| 
 | ||||
|  | ||||
| @ -3344,7 +3344,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, | ||||
| 	int err = 0; | ||||
| 
 | ||||
| 	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, | ||||
| 				   mapping_gfp_mask(mapping) & ~__GFP_FS); | ||||
| 				   mapping_gfp_constraint(mapping, ~__GFP_FS)); | ||||
| 	if (!page) | ||||
| 		return -ENOMEM; | ||||
| 
 | ||||
|  | ||||
| @ -166,7 +166,7 @@ int ext4_mpage_readpages(struct address_space *mapping, | ||||
| 			page = list_entry(pages->prev, struct page, lru); | ||||
| 			list_del(&page->lru); | ||||
| 			if (add_to_page_cache_lru(page, mapping, page->index, | ||||
| 					GFP_KERNEL & mapping_gfp_mask(mapping))) | ||||
| 				  mapping_gfp_constraint(mapping, GFP_KERNEL))) | ||||
| 				goto next_page; | ||||
| 		} | ||||
| 
 | ||||
|  | ||||
| @ -57,7 +57,7 @@ static struct page *get_mapping_page(struct super_block *sb, pgoff_t index, | ||||
| 	filler_t *filler = super->s_devops->readpage; | ||||
| 	struct page *page; | ||||
| 
 | ||||
| 	BUG_ON(mapping_gfp_mask(mapping) & __GFP_FS); | ||||
| 	BUG_ON(mapping_gfp_constraint(mapping, __GFP_FS)); | ||||
| 	if (use_filler) | ||||
| 		page = read_cache_page(mapping, index, filler, sb); | ||||
| 	else { | ||||
|  | ||||
| @ -361,7 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | ||||
| 	sector_t last_block_in_bio = 0; | ||||
| 	struct buffer_head map_bh; | ||||
| 	unsigned long first_logical_block = 0; | ||||
| 	gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping); | ||||
| 	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); | ||||
| 
 | ||||
| 	map_bh.b_state = 0; | ||||
| 	map_bh.b_size = 0; | ||||
| @ -397,7 +397,7 @@ int mpage_readpage(struct page *page, get_block_t get_block) | ||||
| 	sector_t last_block_in_bio = 0; | ||||
| 	struct buffer_head map_bh; | ||||
| 	unsigned long first_logical_block = 0; | ||||
| 	gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping); | ||||
| 	gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); | ||||
| 
 | ||||
| 	map_bh.b_state = 0; | ||||
| 	map_bh.b_size = 0; | ||||
|  | ||||
| @ -4604,7 +4604,7 @@ EXPORT_SYMBOL(__page_symlink); | ||||
| int page_symlink(struct inode *inode, const char *symname, int len) | ||||
| { | ||||
| 	return __page_symlink(inode, symname, len, | ||||
| 			!(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); | ||||
| 			!mapping_gfp_constraint(inode->i_mapping, __GFP_FS)); | ||||
| } | ||||
| EXPORT_SYMBOL(page_symlink); | ||||
| 
 | ||||
|  | ||||
| @ -356,7 +356,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) | ||||
| 		goto failed; | ||||
| 
 | ||||
| 	mapping_set_gfp_mask(inode->i_mapping, | ||||
| 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); | ||||
| 			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); | ||||
| 
 | ||||
| 	root = NILFS_I(dir)->i_root; | ||||
| 	ii = NILFS_I(inode); | ||||
| @ -522,7 +522,7 @@ static int __nilfs_read_inode(struct super_block *sb, | ||||
| 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); | ||||
| 	nilfs_set_inode_flags(inode); | ||||
| 	mapping_set_gfp_mask(inode->i_mapping, | ||||
| 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); | ||||
| 			   mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); | ||||
| 	return 0; | ||||
| 
 | ||||
|  failed_unmap: | ||||
|  | ||||
| @ -525,8 +525,8 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping, | ||||
| 				} | ||||
| 			} | ||||
| 			err = add_to_page_cache_lru(*cached_page, mapping, | ||||
| 					index, | ||||
| 					GFP_KERNEL & mapping_gfp_mask(mapping)); | ||||
| 				   index, | ||||
| 				   mapping_gfp_constraint(mapping, GFP_KERNEL)); | ||||
| 			if (unlikely(err)) { | ||||
| 				if (err == -EEXIST) | ||||
| 					continue; | ||||
|  | ||||
| @ -360,7 +360,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, | ||||
| 				break; | ||||
| 
 | ||||
| 			error = add_to_page_cache_lru(page, mapping, index, | ||||
| 					GFP_KERNEL & mapping_gfp_mask(mapping)); | ||||
| 				   mapping_gfp_constraint(mapping, GFP_KERNEL)); | ||||
| 			if (unlikely(error)) { | ||||
| 				page_cache_release(page); | ||||
| 				if (error == -EEXIST) | ||||
|  | ||||
| @ -69,6 +69,13 @@ static inline gfp_t mapping_gfp_mask(struct address_space * mapping) | ||||
| 	return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; | ||||
| } | ||||
| 
 | ||||
| /* Restricts the given gfp_mask to what the mapping allows. */ | ||||
| static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | ||||
| 		gfp_t gfp_mask) | ||||
| { | ||||
| 	return mapping_gfp_mask(mapping) & gfp_mask; | ||||
| } | ||||
| 
 | ||||
| /*
 | ||||
|  * This is non-atomic.  Only to be used before the mapping is activated. | ||||
|  * Probably needs a barrier... | ||||
|  | ||||
| @ -1722,7 +1722,7 @@ no_cached_page: | ||||
| 			goto out; | ||||
| 		} | ||||
| 		error = add_to_page_cache_lru(page, mapping, index, | ||||
| 					GFP_KERNEL & mapping_gfp_mask(mapping)); | ||||
| 				mapping_gfp_constraint(mapping, GFP_KERNEL)); | ||||
| 		if (error) { | ||||
| 			page_cache_release(page); | ||||
| 			if (error == -EEXIST) { | ||||
| @ -1824,7 +1824,7 @@ static int page_cache_read(struct file *file, pgoff_t offset) | ||||
| 			return -ENOMEM; | ||||
| 
 | ||||
| 		ret = add_to_page_cache_lru(page, mapping, offset, | ||||
| 				GFP_KERNEL & mapping_gfp_mask(mapping)); | ||||
| 				mapping_gfp_constraint(mapping, GFP_KERNEL)); | ||||
| 		if (ret == 0) | ||||
| 			ret = mapping->a_ops->readpage(file, page); | ||||
| 		else if (ret == -EEXIST) | ||||
|  | ||||
| @ -90,7 +90,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, | ||||
| 		page = list_to_page(pages); | ||||
| 		list_del(&page->lru); | ||||
| 		if (add_to_page_cache_lru(page, mapping, page->index, | ||||
| 				GFP_KERNEL & mapping_gfp_mask(mapping))) { | ||||
| 				mapping_gfp_constraint(mapping, GFP_KERNEL))) { | ||||
| 			read_cache_pages_invalidate_page(mapping, page); | ||||
| 			continue; | ||||
| 		} | ||||
| @ -128,7 +128,7 @@ static int read_pages(struct address_space *mapping, struct file *filp, | ||||
| 		struct page *page = list_to_page(pages); | ||||
| 		list_del(&page->lru); | ||||
| 		if (!add_to_page_cache_lru(page, mapping, page->index, | ||||
| 				GFP_KERNEL & mapping_gfp_mask(mapping))) { | ||||
| 				mapping_gfp_constraint(mapping, GFP_KERNEL))) { | ||||
| 			mapping->a_ops->readpage(filp, page); | ||||
| 		} | ||||
| 		page_cache_release(page); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user