[PATCH] add vm_insert_pfn()
Add a vm_insert_pfn helper, so that ->fault handlers can have nopfn functionality by installing their own pte and returning NULL. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Arnd Bergmann <arnd.bergmann@de.ibm.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									2ca48ed5cc
								
							
						
					
					
						commit
						e0dc0d8f4a
					
				| @ -1124,6 +1124,8 @@ unsigned long vmalloc_to_pfn(void *addr); | ||||
| int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | ||||
| 			unsigned long pfn, unsigned long size, pgprot_t); | ||||
| int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); | ||||
| int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, | ||||
| 			unsigned long pfn); | ||||
| 
 | ||||
| struct page *follow_page(struct vm_area_struct *, unsigned long address, | ||||
| 			unsigned int foll_flags); | ||||
|  | ||||
							
								
								
									
										45
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										45
									
								
								mm/memory.c
									
									
									
									
									
								
							| @ -1277,6 +1277,51 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page * | ||||
| } | ||||
| EXPORT_SYMBOL(vm_insert_page); | ||||
| 
 | ||||
| /**
 | ||||
|  * vm_insert_pfn - insert single pfn into user vma | ||||
|  * @vma: user vma to map to | ||||
|  * @addr: target user address of this page | ||||
|  * @pfn: source kernel pfn | ||||
|  * | ||||
|  * Similar to vm_inert_page, this allows drivers to insert individual pages | ||||
|  * they've allocated into a user vma. Same comments apply. | ||||
|  * | ||||
|  * This function should only be called from a vm_ops->fault handler, and | ||||
|  * in that case the handler should return NULL. | ||||
|  */ | ||||
| int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, | ||||
| 		unsigned long pfn) | ||||
| { | ||||
| 	struct mm_struct *mm = vma->vm_mm; | ||||
| 	int retval; | ||||
| 	pte_t *pte, entry; | ||||
| 	spinlock_t *ptl; | ||||
| 
 | ||||
| 	BUG_ON(!(vma->vm_flags & VM_PFNMAP)); | ||||
| 	BUG_ON(is_cow_mapping(vma->vm_flags)); | ||||
| 
 | ||||
| 	retval = -ENOMEM; | ||||
| 	pte = get_locked_pte(mm, addr, &ptl); | ||||
| 	if (!pte) | ||||
| 		goto out; | ||||
| 	retval = -EBUSY; | ||||
| 	if (!pte_none(*pte)) | ||||
| 		goto out_unlock; | ||||
| 
 | ||||
| 	/* Ok, finally just insert the thing.. */ | ||||
| 	entry = pfn_pte(pfn, vma->vm_page_prot); | ||||
| 	set_pte_at(mm, addr, pte, entry); | ||||
| 	update_mmu_cache(vma, addr, entry); | ||||
| 
 | ||||
| 	retval = 0; | ||||
| out_unlock: | ||||
| 	pte_unmap_unlock(pte, ptl); | ||||
| 
 | ||||
| out: | ||||
| 	return retval; | ||||
| } | ||||
| EXPORT_SYMBOL(vm_insert_pfn); | ||||
| 
 | ||||
| /*
 | ||||
|  * maps a range of physical memory into the requested pages. the old | ||||
|  * mappings are removed. any references to nonexistent pages results | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user