mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-09-04 20:19:47 +08:00 
			
		
		
		
	 2b74030354
			
		
	
	
		2b74030354
		
	
	
	
	
		
			
			Use new return type vm_fault_t for fault handler.  For now, this is just
documenting that the function returns a VM_FAULT value rather than an
errno.  Once all instances are converted, vm_fault_t will become a
distinct type.
Ref-> commit 1c8f422059 ("mm: change return type to vm_fault_t")
The aim is to change the return type of finish_fault() and
handle_mm_fault() to vm_fault_t type.  As part of that clean up return
type of all other recursively called functions have been changed to
vm_fault_t type.
The places from where handle_mm_fault() is getting invoked will be
change to vm_fault_t type but in a separate patch.
vmf_error() is the newly introduce inline function in 4.17-rc6.
[akpm@linux-foundation.org: don't shadow outer local `ret' in __do_huge_pmd_anonymous_page()]
Link: http://lkml.kernel.org/r/20180604171727.GA20279@jordon-HP-15-Notebook-PC
Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Reviewed-by: Matthew Wilcox <mawilcox@microsoft.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
	
			
		
			
				
	
	
		
			1879 lines
		
	
	
		
			51 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			1879 lines
		
	
	
		
			51 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #include <linux/kernel.h>
 | |
| #include <linux/errno.h>
 | |
| #include <linux/err.h>
 | |
| #include <linux/spinlock.h>
 | |
| 
 | |
| #include <linux/mm.h>
 | |
| #include <linux/memremap.h>
 | |
| #include <linux/pagemap.h>
 | |
| #include <linux/rmap.h>
 | |
| #include <linux/swap.h>
 | |
| #include <linux/swapops.h>
 | |
| 
 | |
| #include <linux/sched/signal.h>
 | |
| #include <linux/rwsem.h>
 | |
| #include <linux/hugetlb.h>
 | |
| 
 | |
| #include <asm/mmu_context.h>
 | |
| #include <asm/pgtable.h>
 | |
| #include <asm/tlbflush.h>
 | |
| 
 | |
| #include "internal.h"
 | |
| 
 | |
| static struct page *no_page_table(struct vm_area_struct *vma,
 | |
| 		unsigned int flags)
 | |
| {
 | |
| 	/*
 | |
| 	 * When core dumping an enormous anonymous area that nobody
 | |
| 	 * has touched so far, we don't want to allocate unnecessary pages or
 | |
| 	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
 | |
| 	 * then get_dump_page() will return NULL to leave a hole in the dump.
 | |
| 	 * But we can only make this optimization where a hole would surely
 | |
| 	 * be zero-filled if handle_mm_fault() actually did handle it.
 | |
| 	 */
 | |
| 	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
 | |
| 		return ERR_PTR(-EFAULT);
 | |
| 	return NULL;
 | |
| }
 | |
| 
 | |
| static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
 | |
| 		pte_t *pte, unsigned int flags)
 | |
| {
 | |
| 	/* No page to get reference */
 | |
| 	if (flags & FOLL_GET)
 | |
| 		return -EFAULT;
 | |
| 
 | |
| 	if (flags & FOLL_TOUCH) {
 | |
| 		pte_t entry = *pte;
 | |
| 
 | |
| 		if (flags & FOLL_WRITE)
 | |
| 			entry = pte_mkdirty(entry);
 | |
| 		entry = pte_mkyoung(entry);
 | |
| 
 | |
| 		if (!pte_same(*pte, entry)) {
 | |
| 			set_pte_at(vma->vm_mm, address, pte, entry);
 | |
| 			update_mmu_cache(vma, address, pte);
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	/* Proper page table entry exists, but no corresponding struct page */
 | |
| 	return -EEXIST;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * FOLL_FORCE can write to even unwritable pte's, but only
 | |
|  * after we've gone through a COW cycle and they are dirty.
 | |
|  */
 | |
| static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
 | |
| {
 | |
| 	return pte_write(pte) ||
 | |
| 		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
 | |
| }
 | |
| 
 | |
| static struct page *follow_page_pte(struct vm_area_struct *vma,
 | |
| 		unsigned long address, pmd_t *pmd, unsigned int flags)
 | |
| {
 | |
| 	struct mm_struct *mm = vma->vm_mm;
 | |
| 	struct dev_pagemap *pgmap = NULL;
 | |
| 	struct page *page;
 | |
| 	spinlock_t *ptl;
 | |
| 	pte_t *ptep, pte;
 | |
| 
 | |
| retry:
 | |
| 	if (unlikely(pmd_bad(*pmd)))
 | |
| 		return no_page_table(vma, flags);
 | |
| 
 | |
| 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
 | |
| 	pte = *ptep;
 | |
| 	if (!pte_present(pte)) {
 | |
| 		swp_entry_t entry;
 | |
| 		/*
 | |
| 		 * KSM's break_ksm() relies upon recognizing a ksm page
 | |
| 		 * even while it is being migrated, so for that case we
 | |
| 		 * need migration_entry_wait().
 | |
| 		 */
 | |
| 		if (likely(!(flags & FOLL_MIGRATION)))
 | |
| 			goto no_page;
 | |
| 		if (pte_none(pte))
 | |
| 			goto no_page;
 | |
| 		entry = pte_to_swp_entry(pte);
 | |
| 		if (!is_migration_entry(entry))
 | |
| 			goto no_page;
 | |
| 		pte_unmap_unlock(ptep, ptl);
 | |
| 		migration_entry_wait(mm, pmd, address);
 | |
| 		goto retry;
 | |
| 	}
 | |
| 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 | |
| 		goto no_page;
 | |
| 	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
 | |
| 		pte_unmap_unlock(ptep, ptl);
 | |
| 		return NULL;
 | |
| 	}
 | |
| 
 | |
| 	page = vm_normal_page(vma, address, pte);
 | |
| 	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
 | |
| 		/*
 | |
| 		 * Only return device mapping pages in the FOLL_GET case since
 | |
| 		 * they are only valid while holding the pgmap reference.
 | |
| 		 */
 | |
| 		pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
 | |
| 		if (pgmap)
 | |
| 			page = pte_page(pte);
 | |
| 		else
 | |
| 			goto no_page;
 | |
| 	} else if (unlikely(!page)) {
 | |
| 		if (flags & FOLL_DUMP) {
 | |
| 			/* Avoid special (like zero) pages in core dumps */
 | |
| 			page = ERR_PTR(-EFAULT);
 | |
| 			goto out;
 | |
| 		}
 | |
| 
 | |
| 		if (is_zero_pfn(pte_pfn(pte))) {
 | |
| 			page = pte_page(pte);
 | |
| 		} else {
 | |
| 			int ret;
 | |
| 
 | |
| 			ret = follow_pfn_pte(vma, address, ptep, flags);
 | |
| 			page = ERR_PTR(ret);
 | |
| 			goto out;
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
 | |
| 		int ret;
 | |
| 		get_page(page);
 | |
| 		pte_unmap_unlock(ptep, ptl);
 | |
| 		lock_page(page);
 | |
| 		ret = split_huge_page(page);
 | |
| 		unlock_page(page);
 | |
| 		put_page(page);
 | |
| 		if (ret)
 | |
| 			return ERR_PTR(ret);
 | |
| 		goto retry;
 | |
| 	}
 | |
| 
 | |
| 	if (flags & FOLL_GET) {
 | |
| 		get_page(page);
 | |
| 
 | |
| 		/* drop the pgmap reference now that we hold the page */
 | |
| 		if (pgmap) {
 | |
| 			put_dev_pagemap(pgmap);
 | |
| 			pgmap = NULL;
 | |
| 		}
 | |
| 	}
 | |
| 	if (flags & FOLL_TOUCH) {
 | |
| 		if ((flags & FOLL_WRITE) &&
 | |
| 		    !pte_dirty(pte) && !PageDirty(page))
 | |
| 			set_page_dirty(page);
 | |
| 		/*
 | |
| 		 * pte_mkyoung() would be more correct here, but atomic care
 | |
| 		 * is needed to avoid losing the dirty bit: it is easier to use
 | |
| 		 * mark_page_accessed().
 | |
| 		 */
 | |
| 		mark_page_accessed(page);
 | |
| 	}
 | |
| 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
 | |
| 		/* Do not mlock pte-mapped THP */
 | |
| 		if (PageTransCompound(page))
 | |
| 			goto out;
 | |
| 
 | |
| 		/*
 | |
| 		 * The preliminary mapping check is mainly to avoid the
 | |
| 		 * pointless overhead of lock_page on the ZERO_PAGE
 | |
| 		 * which might bounce very badly if there is contention.
 | |
| 		 *
 | |
| 		 * If the page is already locked, we don't need to
 | |
| 		 * handle it now - vmscan will handle it later if and
 | |
| 		 * when it attempts to reclaim the page.
 | |
| 		 */
 | |
| 		if (page->mapping && trylock_page(page)) {
 | |
| 			lru_add_drain();  /* push cached pages to LRU */
 | |
| 			/*
 | |
| 			 * Because we lock page here, and migration is
 | |
| 			 * blocked by the pte's page reference, and we
 | |
| 			 * know the page is still mapped, we don't even
 | |
| 			 * need to check for file-cache page truncation.
 | |
| 			 */
 | |
| 			mlock_vma_page(page);
 | |
| 			unlock_page(page);
 | |
| 		}
 | |
| 	}
 | |
| out:
 | |
| 	pte_unmap_unlock(ptep, ptl);
 | |
| 	return page;
 | |
| no_page:
 | |
| 	pte_unmap_unlock(ptep, ptl);
 | |
| 	if (!pte_none(pte))
 | |
| 		return NULL;
 | |
| 	return no_page_table(vma, flags);
 | |
| }
 | |
| 
 | |
| static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 | |
| 				    unsigned long address, pud_t *pudp,
 | |
| 				    unsigned int flags, unsigned int *page_mask)
 | |
| {
 | |
| 	pmd_t *pmd, pmdval;
 | |
| 	spinlock_t *ptl;
 | |
| 	struct page *page;
 | |
| 	struct mm_struct *mm = vma->vm_mm;
 | |
| 
 | |
| 	pmd = pmd_offset(pudp, address);
 | |
| 	/*
 | |
| 	 * The READ_ONCE() will stabilize the pmdval in a register or
 | |
| 	 * on the stack so that it will stop changing under the code.
 | |
| 	 */
 | |
| 	pmdval = READ_ONCE(*pmd);
 | |
| 	if (pmd_none(pmdval))
 | |
| 		return no_page_table(vma, flags);
 | |
| 	if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
 | |
| 		page = follow_huge_pmd(mm, address, pmd, flags);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 		return no_page_table(vma, flags);
 | |
| 	}
 | |
| 	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
 | |
| 		page = follow_huge_pd(vma, address,
 | |
| 				      __hugepd(pmd_val(pmdval)), flags,
 | |
| 				      PMD_SHIFT);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 		return no_page_table(vma, flags);
 | |
| 	}
 | |
| retry:
 | |
| 	if (!pmd_present(pmdval)) {
 | |
| 		if (likely(!(flags & FOLL_MIGRATION)))
 | |
| 			return no_page_table(vma, flags);
 | |
| 		VM_BUG_ON(thp_migration_supported() &&
 | |
| 				  !is_pmd_migration_entry(pmdval));
 | |
| 		if (is_pmd_migration_entry(pmdval))
 | |
| 			pmd_migration_entry_wait(mm, pmd);
 | |
| 		pmdval = READ_ONCE(*pmd);
 | |
| 		/*
 | |
| 		 * MADV_DONTNEED may convert the pmd to null because
 | |
| 		 * mmap_sem is held in read mode
 | |
| 		 */
 | |
| 		if (pmd_none(pmdval))
 | |
| 			return no_page_table(vma, flags);
 | |
| 		goto retry;
 | |
| 	}
 | |
| 	if (pmd_devmap(pmdval)) {
 | |
| 		ptl = pmd_lock(mm, pmd);
 | |
| 		page = follow_devmap_pmd(vma, address, pmd, flags);
 | |
| 		spin_unlock(ptl);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 	}
 | |
| 	if (likely(!pmd_trans_huge(pmdval)))
 | |
| 		return follow_page_pte(vma, address, pmd, flags);
 | |
| 
 | |
| 	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
 | |
| 		return no_page_table(vma, flags);
 | |
| 
 | |
| retry_locked:
 | |
| 	ptl = pmd_lock(mm, pmd);
 | |
| 	if (unlikely(pmd_none(*pmd))) {
 | |
| 		spin_unlock(ptl);
 | |
| 		return no_page_table(vma, flags);
 | |
| 	}
 | |
| 	if (unlikely(!pmd_present(*pmd))) {
 | |
| 		spin_unlock(ptl);
 | |
| 		if (likely(!(flags & FOLL_MIGRATION)))
 | |
| 			return no_page_table(vma, flags);
 | |
| 		pmd_migration_entry_wait(mm, pmd);
 | |
| 		goto retry_locked;
 | |
| 	}
 | |
| 	if (unlikely(!pmd_trans_huge(*pmd))) {
 | |
| 		spin_unlock(ptl);
 | |
| 		return follow_page_pte(vma, address, pmd, flags);
 | |
| 	}
 | |
| 	if (flags & FOLL_SPLIT) {
 | |
| 		int ret;
 | |
| 		page = pmd_page(*pmd);
 | |
| 		if (is_huge_zero_page(page)) {
 | |
| 			spin_unlock(ptl);
 | |
| 			ret = 0;
 | |
| 			split_huge_pmd(vma, pmd, address);
 | |
| 			if (pmd_trans_unstable(pmd))
 | |
| 				ret = -EBUSY;
 | |
| 		} else {
 | |
| 			get_page(page);
 | |
| 			spin_unlock(ptl);
 | |
| 			lock_page(page);
 | |
| 			ret = split_huge_page(page);
 | |
| 			unlock_page(page);
 | |
| 			put_page(page);
 | |
| 			if (pmd_none(*pmd))
 | |
| 				return no_page_table(vma, flags);
 | |
| 		}
 | |
| 
 | |
| 		return ret ? ERR_PTR(ret) :
 | |
| 			follow_page_pte(vma, address, pmd, flags);
 | |
| 	}
 | |
| 	page = follow_trans_huge_pmd(vma, address, pmd, flags);
 | |
| 	spin_unlock(ptl);
 | |
| 	*page_mask = HPAGE_PMD_NR - 1;
 | |
| 	return page;
 | |
| }
 | |
| 
 | |
| 
 | |
| static struct page *follow_pud_mask(struct vm_area_struct *vma,
 | |
| 				    unsigned long address, p4d_t *p4dp,
 | |
| 				    unsigned int flags, unsigned int *page_mask)
 | |
| {
 | |
| 	pud_t *pud;
 | |
| 	spinlock_t *ptl;
 | |
| 	struct page *page;
 | |
| 	struct mm_struct *mm = vma->vm_mm;
 | |
| 
 | |
| 	pud = pud_offset(p4dp, address);
 | |
| 	if (pud_none(*pud))
 | |
| 		return no_page_table(vma, flags);
 | |
| 	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
 | |
| 		page = follow_huge_pud(mm, address, pud, flags);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 		return no_page_table(vma, flags);
 | |
| 	}
 | |
| 	if (is_hugepd(__hugepd(pud_val(*pud)))) {
 | |
| 		page = follow_huge_pd(vma, address,
 | |
| 				      __hugepd(pud_val(*pud)), flags,
 | |
| 				      PUD_SHIFT);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 		return no_page_table(vma, flags);
 | |
| 	}
 | |
| 	if (pud_devmap(*pud)) {
 | |
| 		ptl = pud_lock(mm, pud);
 | |
| 		page = follow_devmap_pud(vma, address, pud, flags);
 | |
| 		spin_unlock(ptl);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 	}
 | |
| 	if (unlikely(pud_bad(*pud)))
 | |
| 		return no_page_table(vma, flags);
 | |
| 
 | |
| 	return follow_pmd_mask(vma, address, pud, flags, page_mask);
 | |
| }
 | |
| 
 | |
| 
 | |
| static struct page *follow_p4d_mask(struct vm_area_struct *vma,
 | |
| 				    unsigned long address, pgd_t *pgdp,
 | |
| 				    unsigned int flags, unsigned int *page_mask)
 | |
| {
 | |
| 	p4d_t *p4d;
 | |
| 	struct page *page;
 | |
| 
 | |
| 	p4d = p4d_offset(pgdp, address);
 | |
| 	if (p4d_none(*p4d))
 | |
| 		return no_page_table(vma, flags);
 | |
| 	BUILD_BUG_ON(p4d_huge(*p4d));
 | |
| 	if (unlikely(p4d_bad(*p4d)))
 | |
| 		return no_page_table(vma, flags);
 | |
| 
 | |
| 	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
 | |
| 		page = follow_huge_pd(vma, address,
 | |
| 				      __hugepd(p4d_val(*p4d)), flags,
 | |
| 				      P4D_SHIFT);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 		return no_page_table(vma, flags);
 | |
| 	}
 | |
| 	return follow_pud_mask(vma, address, p4d, flags, page_mask);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * follow_page_mask - look up a page descriptor from a user-virtual address
 | |
|  * @vma: vm_area_struct mapping @address
 | |
|  * @address: virtual address to look up
 | |
|  * @flags: flags modifying lookup behaviour
 | |
|  * @page_mask: on output, *page_mask is set according to the size of the page
 | |
|  *
 | |
|  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 | |
|  *
 | |
|  * Returns the mapped (struct page *), %NULL if no mapping exists, or
 | |
|  * an error pointer if there is a mapping to something not represented
 | |
|  * by a page descriptor (see also vm_normal_page()).
 | |
|  */
 | |
| struct page *follow_page_mask(struct vm_area_struct *vma,
 | |
| 			      unsigned long address, unsigned int flags,
 | |
| 			      unsigned int *page_mask)
 | |
| {
 | |
| 	pgd_t *pgd;
 | |
| 	struct page *page;
 | |
| 	struct mm_struct *mm = vma->vm_mm;
 | |
| 
 | |
| 	*page_mask = 0;
 | |
| 
 | |
| 	/* make this handle hugepd */
 | |
| 	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
 | |
| 	if (!IS_ERR(page)) {
 | |
| 		BUG_ON(flags & FOLL_GET);
 | |
| 		return page;
 | |
| 	}
 | |
| 
 | |
| 	pgd = pgd_offset(mm, address);
 | |
| 
 | |
| 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
 | |
| 		return no_page_table(vma, flags);
 | |
| 
 | |
| 	if (pgd_huge(*pgd)) {
 | |
| 		page = follow_huge_pgd(mm, address, pgd, flags);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 		return no_page_table(vma, flags);
 | |
| 	}
 | |
| 	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
 | |
| 		page = follow_huge_pd(vma, address,
 | |
| 				      __hugepd(pgd_val(*pgd)), flags,
 | |
| 				      PGDIR_SHIFT);
 | |
| 		if (page)
 | |
| 			return page;
 | |
| 		return no_page_table(vma, flags);
 | |
| 	}
 | |
| 
 | |
| 	return follow_p4d_mask(vma, address, pgd, flags, page_mask);
 | |
| }
 | |
| 
 | |
| static int get_gate_page(struct mm_struct *mm, unsigned long address,
 | |
| 		unsigned int gup_flags, struct vm_area_struct **vma,
 | |
| 		struct page **page)
 | |
| {
 | |
| 	pgd_t *pgd;
 | |
| 	p4d_t *p4d;
 | |
| 	pud_t *pud;
 | |
| 	pmd_t *pmd;
 | |
| 	pte_t *pte;
 | |
| 	int ret = -EFAULT;
 | |
| 
 | |
| 	/* user gate pages are read-only */
 | |
| 	if (gup_flags & FOLL_WRITE)
 | |
| 		return -EFAULT;
 | |
| 	if (address > TASK_SIZE)
 | |
| 		pgd = pgd_offset_k(address);
 | |
| 	else
 | |
| 		pgd = pgd_offset_gate(mm, address);
 | |
| 	BUG_ON(pgd_none(*pgd));
 | |
| 	p4d = p4d_offset(pgd, address);
 | |
| 	BUG_ON(p4d_none(*p4d));
 | |
| 	pud = pud_offset(p4d, address);
 | |
| 	BUG_ON(pud_none(*pud));
 | |
| 	pmd = pmd_offset(pud, address);
 | |
| 	if (!pmd_present(*pmd))
 | |
| 		return -EFAULT;
 | |
| 	VM_BUG_ON(pmd_trans_huge(*pmd));
 | |
| 	pte = pte_offset_map(pmd, address);
 | |
| 	if (pte_none(*pte))
 | |
| 		goto unmap;
 | |
| 	*vma = get_gate_vma(mm);
 | |
| 	if (!page)
 | |
| 		goto out;
 | |
| 	*page = vm_normal_page(*vma, address, *pte);
 | |
| 	if (!*page) {
 | |
| 		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
 | |
| 			goto unmap;
 | |
| 		*page = pte_page(*pte);
 | |
| 
 | |
| 		/*
 | |
| 		 * This should never happen (a device public page in the gate
 | |
| 		 * area).
 | |
| 		 */
 | |
| 		if (is_device_public_page(*page))
 | |
| 			goto unmap;
 | |
| 	}
 | |
| 	get_page(*page);
 | |
| out:
 | |
| 	ret = 0;
 | |
| unmap:
 | |
| 	pte_unmap(pte);
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * mmap_sem must be held on entry.  If @nonblocking != NULL and
 | |
|  * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
 | |
|  * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
 | |
|  */
 | |
| static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
 | |
| 		unsigned long address, unsigned int *flags, int *nonblocking)
 | |
| {
 | |
| 	unsigned int fault_flags = 0;
 | |
| 	vm_fault_t ret;
 | |
| 
 | |
| 	/* mlock all present pages, but do not fault in new pages */
 | |
| 	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
 | |
| 		return -ENOENT;
 | |
| 	if (*flags & FOLL_WRITE)
 | |
| 		fault_flags |= FAULT_FLAG_WRITE;
 | |
| 	if (*flags & FOLL_REMOTE)
 | |
| 		fault_flags |= FAULT_FLAG_REMOTE;
 | |
| 	if (nonblocking)
 | |
| 		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 | |
| 	if (*flags & FOLL_NOWAIT)
 | |
| 		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
 | |
| 	if (*flags & FOLL_TRIED) {
 | |
| 		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
 | |
| 		fault_flags |= FAULT_FLAG_TRIED;
 | |
| 	}
 | |
| 
 | |
| 	ret = handle_mm_fault(vma, address, fault_flags);
 | |
| 	if (ret & VM_FAULT_ERROR) {
 | |
| 		int err = vm_fault_to_errno(ret, *flags);
 | |
| 
 | |
| 		if (err)
 | |
| 			return err;
 | |
| 		BUG();
 | |
| 	}
 | |
| 
 | |
| 	if (tsk) {
 | |
| 		if (ret & VM_FAULT_MAJOR)
 | |
| 			tsk->maj_flt++;
 | |
| 		else
 | |
| 			tsk->min_flt++;
 | |
| 	}
 | |
| 
 | |
| 	if (ret & VM_FAULT_RETRY) {
 | |
| 		if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
 | |
| 			*nonblocking = 0;
 | |
| 		return -EBUSY;
 | |
| 	}
 | |
| 
 | |
| 	/*
 | |
| 	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
 | |
| 	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
 | |
| 	 * can thus safely do subsequent page lookups as if they were reads.
 | |
| 	 * But only do so when looping for pte_write is futile: in some cases
 | |
| 	 * userspace may also be wanting to write to the gotten user page,
 | |
| 	 * which a read fault here might prevent (a readonly page might get
 | |
| 	 * reCOWed by userspace write).
 | |
| 	 */
 | |
| 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
 | |
| 		*flags |= FOLL_COW;
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
 | |
| {
 | |
| 	vm_flags_t vm_flags = vma->vm_flags;
 | |
| 	int write = (gup_flags & FOLL_WRITE);
 | |
| 	int foreign = (gup_flags & FOLL_REMOTE);
 | |
| 
 | |
| 	if (vm_flags & (VM_IO | VM_PFNMAP))
 | |
| 		return -EFAULT;
 | |
| 
 | |
| 	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
 | |
| 		return -EFAULT;
 | |
| 
 | |
| 	if (write) {
 | |
| 		if (!(vm_flags & VM_WRITE)) {
 | |
| 			if (!(gup_flags & FOLL_FORCE))
 | |
| 				return -EFAULT;
 | |
| 			/*
 | |
| 			 * We used to let the write,force case do COW in a
 | |
| 			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
 | |
| 			 * set a breakpoint in a read-only mapping of an
 | |
| 			 * executable, without corrupting the file (yet only
 | |
| 			 * when that file had been opened for writing!).
 | |
| 			 * Anon pages in shared mappings are surprising: now
 | |
| 			 * just reject it.
 | |
| 			 */
 | |
| 			if (!is_cow_mapping(vm_flags))
 | |
| 				return -EFAULT;
 | |
| 		}
 | |
| 	} else if (!(vm_flags & VM_READ)) {
 | |
| 		if (!(gup_flags & FOLL_FORCE))
 | |
| 			return -EFAULT;
 | |
| 		/*
 | |
| 		 * Is there actually any vma we can reach here which does not
 | |
| 		 * have VM_MAYREAD set?
 | |
| 		 */
 | |
| 		if (!(vm_flags & VM_MAYREAD))
 | |
| 			return -EFAULT;
 | |
| 	}
 | |
| 	/*
 | |
| 	 * gups are always data accesses, not instruction
 | |
| 	 * fetches, so execute=false here
 | |
| 	 */
 | |
| 	if (!arch_vma_access_permitted(vma, write, false, foreign))
 | |
| 		return -EFAULT;
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * __get_user_pages() - pin user pages in memory
 | |
|  * @tsk:	task_struct of target task
 | |
|  * @mm:		mm_struct of target mm
 | |
|  * @start:	starting user address
 | |
|  * @nr_pages:	number of pages from start to pin
 | |
|  * @gup_flags:	flags modifying pin behaviour
 | |
|  * @pages:	array that receives pointers to the pages pinned.
 | |
|  *		Should be at least nr_pages long. Or NULL, if caller
 | |
|  *		only intends to ensure the pages are faulted in.
 | |
|  * @vmas:	array of pointers to vmas corresponding to each page.
 | |
|  *		Or NULL if the caller does not require them.
 | |
|  * @nonblocking: whether waiting for disk IO or mmap_sem contention
 | |
|  *
 | |
|  * Returns number of pages pinned. This may be fewer than the number
 | |
|  * requested. If nr_pages is 0 or negative, returns 0. If no pages
 | |
|  * were pinned, returns -errno. Each page returned must be released
 | |
|  * with a put_page() call when it is finished with. vmas will only
 | |
|  * remain valid while mmap_sem is held.
 | |
|  *
 | |
|  * Must be called with mmap_sem held.  It may be released.  See below.
 | |
|  *
 | |
|  * __get_user_pages walks a process's page tables and takes a reference to
 | |
|  * each struct page that each user address corresponds to at a given
 | |
|  * instant. That is, it takes the page that would be accessed if a user
 | |
|  * thread accesses the given user virtual address at that instant.
 | |
|  *
 | |
|  * This does not guarantee that the page exists in the user mappings when
 | |
|  * __get_user_pages returns, and there may even be a completely different
 | |
|  * page there in some cases (eg. if mmapped pagecache has been invalidated
 | |
|  * and subsequently re faulted). However it does guarantee that the page
 | |
|  * won't be freed completely. And mostly callers simply care that the page
 | |
|  * contains data that was valid *at some point in time*. Typically, an IO
 | |
|  * or similar operation cannot guarantee anything stronger anyway because
 | |
|  * locks can't be held over the syscall boundary.
 | |
|  *
 | |
|  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
 | |
|  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
 | |
|  * appropriate) must be called after the page is finished with, and
 | |
|  * before put_page is called.
 | |
|  *
 | |
|  * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
 | |
|  * or mmap_sem contention, and if waiting is needed to pin all pages,
 | |
|  * *@nonblocking will be set to 0.  Further, if @gup_flags does not
 | |
|  * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
 | |
|  * this case.
 | |
|  *
 | |
|  * A caller using such a combination of @nonblocking and @gup_flags
 | |
|  * must therefore hold the mmap_sem for reading only, and recognize
 | |
|  * when it's been released.  Otherwise, it must be held for either
 | |
|  * reading or writing and will not be released.
 | |
|  *
 | |
|  * In most cases, get_user_pages or get_user_pages_fast should be used
 | |
|  * instead of __get_user_pages. __get_user_pages should be used only if
 | |
|  * you need some special @gup_flags.
 | |
|  */
 | |
| static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 | |
| 		unsigned long start, unsigned long nr_pages,
 | |
| 		unsigned int gup_flags, struct page **pages,
 | |
| 		struct vm_area_struct **vmas, int *nonblocking)
 | |
| {
 | |
| 	long i = 0;
 | |
| 	unsigned int page_mask;
 | |
| 	struct vm_area_struct *vma = NULL;
 | |
| 
 | |
| 	if (!nr_pages)
 | |
| 		return 0;
 | |
| 
 | |
| 	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
 | |
| 
 | |
| 	/*
 | |
| 	 * If FOLL_FORCE is set then do not force a full fault as the hinting
 | |
| 	 * fault information is unrelated to the reference behaviour of a task
 | |
| 	 * using the address space
 | |
| 	 */
 | |
| 	if (!(gup_flags & FOLL_FORCE))
 | |
| 		gup_flags |= FOLL_NUMA;
 | |
| 
 | |
| 	do {
 | |
| 		struct page *page;
 | |
| 		unsigned int foll_flags = gup_flags;
 | |
| 		unsigned int page_increm;
 | |
| 
 | |
| 		/* first iteration or cross vma bound */
 | |
| 		if (!vma || start >= vma->vm_end) {
 | |
| 			vma = find_extend_vma(mm, start);
 | |
| 			if (!vma && in_gate_area(mm, start)) {
 | |
| 				int ret;
 | |
| 				ret = get_gate_page(mm, start & PAGE_MASK,
 | |
| 						gup_flags, &vma,
 | |
| 						pages ? &pages[i] : NULL);
 | |
| 				if (ret)
 | |
| 					return i ? : ret;
 | |
| 				page_mask = 0;
 | |
| 				goto next_page;
 | |
| 			}
 | |
| 
 | |
| 			if (!vma || check_vma_flags(vma, gup_flags))
 | |
| 				return i ? : -EFAULT;
 | |
| 			if (is_vm_hugetlb_page(vma)) {
 | |
| 				i = follow_hugetlb_page(mm, vma, pages, vmas,
 | |
| 						&start, &nr_pages, i,
 | |
| 						gup_flags, nonblocking);
 | |
| 				continue;
 | |
| 			}
 | |
| 		}
 | |
| retry:
 | |
| 		/*
 | |
| 		 * If we have a pending SIGKILL, don't keep faulting pages and
 | |
| 		 * potentially allocating memory.
 | |
| 		 */
 | |
| 		if (unlikely(fatal_signal_pending(current)))
 | |
| 			return i ? i : -ERESTARTSYS;
 | |
| 		cond_resched();
 | |
| 		page = follow_page_mask(vma, start, foll_flags, &page_mask);
 | |
| 		if (!page) {
 | |
| 			int ret;
 | |
| 			ret = faultin_page(tsk, vma, start, &foll_flags,
 | |
| 					nonblocking);
 | |
| 			switch (ret) {
 | |
| 			case 0:
 | |
| 				goto retry;
 | |
| 			case -EFAULT:
 | |
| 			case -ENOMEM:
 | |
| 			case -EHWPOISON:
 | |
| 				return i ? i : ret;
 | |
| 			case -EBUSY:
 | |
| 				return i;
 | |
| 			case -ENOENT:
 | |
| 				goto next_page;
 | |
| 			}
 | |
| 			BUG();
 | |
| 		} else if (PTR_ERR(page) == -EEXIST) {
 | |
| 			/*
 | |
| 			 * Proper page table entry exists, but no corresponding
 | |
| 			 * struct page.
 | |
| 			 */
 | |
| 			goto next_page;
 | |
| 		} else if (IS_ERR(page)) {
 | |
| 			return i ? i : PTR_ERR(page);
 | |
| 		}
 | |
| 		if (pages) {
 | |
| 			pages[i] = page;
 | |
| 			flush_anon_page(vma, page, start);
 | |
| 			flush_dcache_page(page);
 | |
| 			page_mask = 0;
 | |
| 		}
 | |
| next_page:
 | |
| 		if (vmas) {
 | |
| 			vmas[i] = vma;
 | |
| 			page_mask = 0;
 | |
| 		}
 | |
| 		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
 | |
| 		if (page_increm > nr_pages)
 | |
| 			page_increm = nr_pages;
 | |
| 		i += page_increm;
 | |
| 		start += page_increm * PAGE_SIZE;
 | |
| 		nr_pages -= page_increm;
 | |
| 	} while (nr_pages);
 | |
| 	return i;
 | |
| }
 | |
| 
 | |
| static bool vma_permits_fault(struct vm_area_struct *vma,
 | |
| 			      unsigned int fault_flags)
 | |
| {
 | |
| 	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
 | |
| 	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
 | |
| 	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
 | |
| 
 | |
| 	if (!(vm_flags & vma->vm_flags))
 | |
| 		return false;
 | |
| 
 | |
| 	/*
 | |
| 	 * The architecture might have a hardware protection
 | |
| 	 * mechanism other than read/write that can deny access.
 | |
| 	 *
 | |
| 	 * gup always represents data access, not instruction
 | |
| 	 * fetches, so execute=false here:
 | |
| 	 */
 | |
| 	if (!arch_vma_access_permitted(vma, write, false, foreign))
 | |
| 		return false;
 | |
| 
 | |
| 	return true;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * fixup_user_fault() - manually resolve a user page fault
 | |
|  * @tsk:	the task_struct to use for page fault accounting, or
 | |
|  *		NULL if faults are not to be recorded.
 | |
|  * @mm:		mm_struct of target mm
 | |
|  * @address:	user address
 | |
|  * @fault_flags:flags to pass down to handle_mm_fault()
 | |
|  * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
 | |
|  *		does not allow retry
 | |
|  *
 | |
|  * This is meant to be called in the specific scenario where for locking reasons
 | |
|  * we try to access user memory in atomic context (within a pagefault_disable()
 | |
|  * section), this returns -EFAULT, and we want to resolve the user fault before
 | |
|  * trying again.
 | |
|  *
 | |
|  * Typically this is meant to be used by the futex code.
 | |
|  *
 | |
|  * The main difference with get_user_pages() is that this function will
 | |
|  * unconditionally call handle_mm_fault() which will in turn perform all the
 | |
|  * necessary SW fixup of the dirty and young bits in the PTE, while
 | |
|  * get_user_pages() only guarantees to update these in the struct page.
 | |
|  *
 | |
|  * This is important for some architectures where those bits also gate the
 | |
|  * access permission to the page because they are maintained in software.  On
 | |
|  * such architectures, gup() will not be enough to make a subsequent access
 | |
|  * succeed.
 | |
|  *
 | |
|  * This function will not return with an unlocked mmap_sem. So it has not the
 | |
|  * same semantics wrt the @mm->mmap_sem as does filemap_fault().
 | |
|  */
 | |
| int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
 | |
| 		     unsigned long address, unsigned int fault_flags,
 | |
| 		     bool *unlocked)
 | |
| {
 | |
| 	struct vm_area_struct *vma;
 | |
| 	vm_fault_t ret, major = 0;
 | |
| 
 | |
| 	if (unlocked)
 | |
| 		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 | |
| 
 | |
| retry:
 | |
| 	vma = find_extend_vma(mm, address);
 | |
| 	if (!vma || address < vma->vm_start)
 | |
| 		return -EFAULT;
 | |
| 
 | |
| 	if (!vma_permits_fault(vma, fault_flags))
 | |
| 		return -EFAULT;
 | |
| 
 | |
| 	ret = handle_mm_fault(vma, address, fault_flags);
 | |
| 	major |= ret & VM_FAULT_MAJOR;
 | |
| 	if (ret & VM_FAULT_ERROR) {
 | |
| 		int err = vm_fault_to_errno(ret, 0);
 | |
| 
 | |
| 		if (err)
 | |
| 			return err;
 | |
| 		BUG();
 | |
| 	}
 | |
| 
 | |
| 	if (ret & VM_FAULT_RETRY) {
 | |
| 		down_read(&mm->mmap_sem);
 | |
| 		if (!(fault_flags & FAULT_FLAG_TRIED)) {
 | |
| 			*unlocked = true;
 | |
| 			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
 | |
| 			fault_flags |= FAULT_FLAG_TRIED;
 | |
| 			goto retry;
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if (tsk) {
 | |
| 		if (major)
 | |
| 			tsk->maj_flt++;
 | |
| 		else
 | |
| 			tsk->min_flt++;
 | |
| 	}
 | |
| 	return 0;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(fixup_user_fault);
 | |
| 
 | |
| static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 | |
| 						struct mm_struct *mm,
 | |
| 						unsigned long start,
 | |
| 						unsigned long nr_pages,
 | |
| 						struct page **pages,
 | |
| 						struct vm_area_struct **vmas,
 | |
| 						int *locked,
 | |
| 						unsigned int flags)
 | |
| {
 | |
| 	long ret, pages_done;
 | |
| 	bool lock_dropped;
 | |
| 
 | |
| 	if (locked) {
 | |
| 		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
 | |
| 		BUG_ON(vmas);
 | |
| 		/* check caller initialized locked */
 | |
| 		BUG_ON(*locked != 1);
 | |
| 	}
 | |
| 
 | |
| 	if (pages)
 | |
| 		flags |= FOLL_GET;
 | |
| 
 | |
| 	pages_done = 0;
 | |
| 	lock_dropped = false;
 | |
| 	for (;;) {
 | |
| 		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
 | |
| 				       vmas, locked);
 | |
| 		if (!locked)
 | |
| 			/* VM_FAULT_RETRY couldn't trigger, bypass */
 | |
| 			return ret;
 | |
| 
 | |
| 		/* VM_FAULT_RETRY cannot return errors */
 | |
| 		if (!*locked) {
 | |
| 			BUG_ON(ret < 0);
 | |
| 			BUG_ON(ret >= nr_pages);
 | |
| 		}
 | |
| 
 | |
| 		if (!pages)
 | |
| 			/* If it's a prefault don't insist harder */
 | |
| 			return ret;
 | |
| 
 | |
| 		if (ret > 0) {
 | |
| 			nr_pages -= ret;
 | |
| 			pages_done += ret;
 | |
| 			if (!nr_pages)
 | |
| 				break;
 | |
| 		}
 | |
| 		if (*locked) {
 | |
| 			/*
 | |
| 			 * VM_FAULT_RETRY didn't trigger or it was a
 | |
| 			 * FOLL_NOWAIT.
 | |
| 			 */
 | |
| 			if (!pages_done)
 | |
| 				pages_done = ret;
 | |
| 			break;
 | |
| 		}
 | |
| 		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
 | |
| 		pages += ret;
 | |
| 		start += ret << PAGE_SHIFT;
 | |
| 
 | |
| 		/*
 | |
| 		 * Repeat on the address that fired VM_FAULT_RETRY
 | |
| 		 * without FAULT_FLAG_ALLOW_RETRY but with
 | |
| 		 * FAULT_FLAG_TRIED.
 | |
| 		 */
 | |
| 		*locked = 1;
 | |
| 		lock_dropped = true;
 | |
| 		down_read(&mm->mmap_sem);
 | |
| 		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
 | |
| 				       pages, NULL, NULL);
 | |
| 		if (ret != 1) {
 | |
| 			BUG_ON(ret > 1);
 | |
| 			if (!pages_done)
 | |
| 				pages_done = ret;
 | |
| 			break;
 | |
| 		}
 | |
| 		nr_pages--;
 | |
| 		pages_done++;
 | |
| 		if (!nr_pages)
 | |
| 			break;
 | |
| 		pages++;
 | |
| 		start += PAGE_SIZE;
 | |
| 	}
 | |
| 	if (lock_dropped && *locked) {
 | |
| 		/*
 | |
| 		 * We must let the caller know we temporarily dropped the lock
 | |
| 		 * and so the critical section protected by it was lost.
 | |
| 		 */
 | |
| 		up_read(&mm->mmap_sem);
 | |
| 		*locked = 0;
 | |
| 	}
 | |
| 	return pages_done;
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * We can leverage the VM_FAULT_RETRY functionality in the page fault
 | |
|  * paths better by using either get_user_pages_locked() or
 | |
|  * get_user_pages_unlocked().
 | |
|  *
 | |
|  * get_user_pages_locked() is suitable to replace the form:
 | |
|  *
 | |
|  *      down_read(&mm->mmap_sem);
 | |
|  *      do_something()
 | |
|  *      get_user_pages(tsk, mm, ..., pages, NULL);
 | |
|  *      up_read(&mm->mmap_sem);
 | |
|  *
 | |
|  *  to:
 | |
|  *
 | |
|  *      int locked = 1;
 | |
|  *      down_read(&mm->mmap_sem);
 | |
|  *      do_something()
 | |
|  *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
 | |
|  *      if (locked)
 | |
|  *          up_read(&mm->mmap_sem);
 | |
|  */
 | |
| long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 | |
| 			   unsigned int gup_flags, struct page **pages,
 | |
| 			   int *locked)
 | |
| {
 | |
| 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
 | |
| 				       pages, NULL, locked,
 | |
| 				       gup_flags | FOLL_TOUCH);
 | |
| }
 | |
| EXPORT_SYMBOL(get_user_pages_locked);
 | |
| 
 | |
| /*
 | |
|  * get_user_pages_unlocked() is suitable to replace the form:
 | |
|  *
 | |
|  *      down_read(&mm->mmap_sem);
 | |
|  *      get_user_pages(tsk, mm, ..., pages, NULL);
 | |
|  *      up_read(&mm->mmap_sem);
 | |
|  *
 | |
|  *  with:
 | |
|  *
 | |
|  *      get_user_pages_unlocked(tsk, mm, ..., pages);
 | |
|  *
 | |
|  * It is functionally equivalent to get_user_pages_fast so
 | |
|  * get_user_pages_fast should be used instead if specific gup_flags
 | |
|  * (e.g. FOLL_FORCE) are not required.
 | |
|  */
 | |
| long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 | |
| 			     struct page **pages, unsigned int gup_flags)
 | |
| {
 | |
| 	struct mm_struct *mm = current->mm;
 | |
| 	int locked = 1;
 | |
| 	long ret;
 | |
| 
 | |
| 	down_read(&mm->mmap_sem);
 | |
| 	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
 | |
| 				      &locked, gup_flags | FOLL_TOUCH);
 | |
| 	if (locked)
 | |
| 		up_read(&mm->mmap_sem);
 | |
| 	return ret;
 | |
| }
 | |
| EXPORT_SYMBOL(get_user_pages_unlocked);
 | |
| 
 | |
| /*
 | |
|  * get_user_pages_remote() - pin user pages in memory
 | |
|  * @tsk:	the task_struct to use for page fault accounting, or
 | |
|  *		NULL if faults are not to be recorded.
 | |
|  * @mm:		mm_struct of target mm
 | |
|  * @start:	starting user address
 | |
|  * @nr_pages:	number of pages from start to pin
 | |
|  * @gup_flags:	flags modifying lookup behaviour
 | |
|  * @pages:	array that receives pointers to the pages pinned.
 | |
|  *		Should be at least nr_pages long. Or NULL, if caller
 | |
|  *		only intends to ensure the pages are faulted in.
 | |
|  * @vmas:	array of pointers to vmas corresponding to each page.
 | |
|  *		Or NULL if the caller does not require them.
 | |
|  * @locked:	pointer to lock flag indicating whether lock is held and
 | |
|  *		subsequently whether VM_FAULT_RETRY functionality can be
 | |
|  *		utilised. Lock must initially be held.
 | |
|  *
 | |
|  * Returns number of pages pinned. This may be fewer than the number
 | |
|  * requested. If nr_pages is 0 or negative, returns 0. If no pages
 | |
|  * were pinned, returns -errno. Each page returned must be released
 | |
|  * with a put_page() call when it is finished with. vmas will only
 | |
|  * remain valid while mmap_sem is held.
 | |
|  *
 | |
|  * Must be called with mmap_sem held for read or write.
 | |
|  *
 | |
|  * get_user_pages walks a process's page tables and takes a reference to
 | |
|  * each struct page that each user address corresponds to at a given
 | |
|  * instant. That is, it takes the page that would be accessed if a user
 | |
|  * thread accesses the given user virtual address at that instant.
 | |
|  *
 | |
|  * This does not guarantee that the page exists in the user mappings when
 | |
|  * get_user_pages returns, and there may even be a completely different
 | |
|  * page there in some cases (eg. if mmapped pagecache has been invalidated
 | |
|  * and subsequently re faulted). However it does guarantee that the page
 | |
|  * won't be freed completely. And mostly callers simply care that the page
 | |
|  * contains data that was valid *at some point in time*. Typically, an IO
 | |
|  * or similar operation cannot guarantee anything stronger anyway because
 | |
|  * locks can't be held over the syscall boundary.
 | |
|  *
 | |
|  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
 | |
|  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
 | |
|  * be called after the page is finished with, and before put_page is called.
 | |
|  *
 | |
|  * get_user_pages is typically used for fewer-copy IO operations, to get a
 | |
|  * handle on the memory by some means other than accesses via the user virtual
 | |
|  * addresses. The pages may be submitted for DMA to devices or accessed via
 | |
|  * their kernel linear mapping (via the kmap APIs). Care should be taken to
 | |
|  * use the correct cache flushing APIs.
 | |
|  *
 | |
|  * See also get_user_pages_fast, for performance critical applications.
 | |
|  *
 | |
|  * get_user_pages should be phased out in favor of
 | |
|  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
 | |
|  * should use get_user_pages because it cannot pass
 | |
|  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
 | |
|  */
 | |
| long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 | |
| 		unsigned long start, unsigned long nr_pages,
 | |
| 		unsigned int gup_flags, struct page **pages,
 | |
| 		struct vm_area_struct **vmas, int *locked)
 | |
| {
 | |
| 	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
 | |
| 				       locked,
 | |
| 				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
 | |
| }
 | |
| EXPORT_SYMBOL(get_user_pages_remote);
 | |
| 
 | |
| /*
 | |
|  * This is the same as get_user_pages_remote(), just with a
 | |
|  * less-flexible calling convention where we assume that the task
 | |
|  * and mm being operated on are the current task's and don't allow
 | |
|  * passing of a locked parameter.  We also obviously don't pass
 | |
|  * FOLL_REMOTE in here.
 | |
|  */
 | |
| long get_user_pages(unsigned long start, unsigned long nr_pages,
 | |
| 		unsigned int gup_flags, struct page **pages,
 | |
| 		struct vm_area_struct **vmas)
 | |
| {
 | |
| 	return __get_user_pages_locked(current, current->mm, start, nr_pages,
 | |
| 				       pages, vmas, NULL,
 | |
| 				       gup_flags | FOLL_TOUCH);
 | |
| }
 | |
| EXPORT_SYMBOL(get_user_pages);
 | |
| 
 | |
| #ifdef CONFIG_FS_DAX
 | |
| /*
 | |
|  * This is the same as get_user_pages() in that it assumes we are
 | |
|  * operating on the current task's mm, but it goes further to validate
 | |
|  * that the vmas associated with the address range are suitable for
 | |
|  * longterm elevated page reference counts. For example, filesystem-dax
 | |
|  * mappings are subject to the lifetime enforced by the filesystem and
 | |
|  * we need guarantees that longterm users like RDMA and V4L2 only
 | |
|  * establish mappings that have a kernel enforced revocation mechanism.
 | |
|  *
 | |
|  * "longterm" == userspace controlled elevated page count lifetime.
 | |
|  * Contrast this to iov_iter_get_pages() usages which are transient.
 | |
|  */
 | |
| long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
 | |
| 		unsigned int gup_flags, struct page **pages,
 | |
| 		struct vm_area_struct **vmas_arg)
 | |
| {
 | |
| 	struct vm_area_struct **vmas = vmas_arg;
 | |
| 	struct vm_area_struct *vma_prev = NULL;
 | |
| 	long rc, i;
 | |
| 
 | |
| 	if (!pages)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	if (!vmas) {
 | |
| 		vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
 | |
| 			       GFP_KERNEL);
 | |
| 		if (!vmas)
 | |
| 			return -ENOMEM;
 | |
| 	}
 | |
| 
 | |
| 	rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
 | |
| 
 | |
| 	for (i = 0; i < rc; i++) {
 | |
| 		struct vm_area_struct *vma = vmas[i];
 | |
| 
 | |
| 		if (vma == vma_prev)
 | |
| 			continue;
 | |
| 
 | |
| 		vma_prev = vma;
 | |
| 
 | |
| 		if (vma_is_fsdax(vma))
 | |
| 			break;
 | |
| 	}
 | |
| 
 | |
| 	/*
 | |
| 	 * Either get_user_pages() failed, or the vma validation
 | |
| 	 * succeeded, in either case we don't need to put_page() before
 | |
| 	 * returning.
 | |
| 	 */
 | |
| 	if (i >= rc)
 | |
| 		goto out;
 | |
| 
 | |
| 	for (i = 0; i < rc; i++)
 | |
| 		put_page(pages[i]);
 | |
| 	rc = -EOPNOTSUPP;
 | |
| out:
 | |
| 	if (vmas != vmas_arg)
 | |
| 		kfree(vmas);
 | |
| 	return rc;
 | |
| }
 | |
| EXPORT_SYMBOL(get_user_pages_longterm);
 | |
| #endif /* CONFIG_FS_DAX */
 | |
| 
 | |
| /**
 | |
|  * populate_vma_page_range() -  populate a range of pages in the vma.
 | |
|  * @vma:   target vma
 | |
|  * @start: start address
 | |
|  * @end:   end address
 | |
|  * @nonblocking:
 | |
|  *
 | |
|  * This takes care of mlocking the pages too if VM_LOCKED is set.
 | |
|  *
 | |
|  * return 0 on success, negative error code on error.
 | |
|  *
 | |
|  * vma->vm_mm->mmap_sem must be held.
 | |
|  *
 | |
|  * If @nonblocking is NULL, it may be held for read or write and will
 | |
|  * be unperturbed.
 | |
|  *
 | |
|  * If @nonblocking is non-NULL, it must held for read only and may be
 | |
|  * released.  If it's released, *@nonblocking will be set to 0.
 | |
|  */
 | |
| long populate_vma_page_range(struct vm_area_struct *vma,
 | |
| 		unsigned long start, unsigned long end, int *nonblocking)
 | |
| {
 | |
| 	struct mm_struct *mm = vma->vm_mm;
 | |
| 	unsigned long nr_pages = (end - start) / PAGE_SIZE;
 | |
| 	int gup_flags;
 | |
| 
 | |
| 	VM_BUG_ON(start & ~PAGE_MASK);
 | |
| 	VM_BUG_ON(end   & ~PAGE_MASK);
 | |
| 	VM_BUG_ON_VMA(start < vma->vm_start, vma);
 | |
| 	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
 | |
| 	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
 | |
| 
 | |
| 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
 | |
| 	if (vma->vm_flags & VM_LOCKONFAULT)
 | |
| 		gup_flags &= ~FOLL_POPULATE;
 | |
| 	/*
 | |
| 	 * We want to touch writable mappings with a write fault in order
 | |
| 	 * to break COW, except for shared mappings because these don't COW
 | |
| 	 * and we would not want to dirty them for nothing.
 | |
| 	 */
 | |
| 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
 | |
| 		gup_flags |= FOLL_WRITE;
 | |
| 
 | |
| 	/*
 | |
| 	 * We want mlock to succeed for regions that have any permissions
 | |
| 	 * other than PROT_NONE.
 | |
| 	 */
 | |
| 	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
 | |
| 		gup_flags |= FOLL_FORCE;
 | |
| 
 | |
| 	/*
 | |
| 	 * We made sure addr is within a VMA, so the following will
 | |
| 	 * not result in a stack expansion that recurses back here.
 | |
| 	 */
 | |
| 	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
 | |
| 				NULL, NULL, nonblocking);
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * __mm_populate - populate and/or mlock pages within a range of address space.
 | |
|  *
 | |
|  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
 | |
|  * flags. VMAs must be already marked with the desired vm_flags, and
 | |
|  * mmap_sem must not be held.
 | |
|  */
 | |
| int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
 | |
| {
 | |
| 	struct mm_struct *mm = current->mm;
 | |
| 	unsigned long end, nstart, nend;
 | |
| 	struct vm_area_struct *vma = NULL;
 | |
| 	int locked = 0;
 | |
| 	long ret = 0;
 | |
| 
 | |
| 	end = start + len;
 | |
| 
 | |
| 	for (nstart = start; nstart < end; nstart = nend) {
 | |
| 		/*
 | |
| 		 * We want to fault in pages for [nstart; end) address range.
 | |
| 		 * Find first corresponding VMA.
 | |
| 		 */
 | |
| 		if (!locked) {
 | |
| 			locked = 1;
 | |
| 			down_read(&mm->mmap_sem);
 | |
| 			vma = find_vma(mm, nstart);
 | |
| 		} else if (nstart >= vma->vm_end)
 | |
| 			vma = vma->vm_next;
 | |
| 		if (!vma || vma->vm_start >= end)
 | |
| 			break;
 | |
| 		/*
 | |
| 		 * Set [nstart; nend) to intersection of desired address
 | |
| 		 * range with the first VMA. Also, skip undesirable VMA types.
 | |
| 		 */
 | |
| 		nend = min(end, vma->vm_end);
 | |
| 		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
 | |
| 			continue;
 | |
| 		if (nstart < vma->vm_start)
 | |
| 			nstart = vma->vm_start;
 | |
| 		/*
 | |
| 		 * Now fault in a range of pages. populate_vma_page_range()
 | |
| 		 * double checks the vma flags, so that it won't mlock pages
 | |
| 		 * if the vma was already munlocked.
 | |
| 		 */
 | |
| 		ret = populate_vma_page_range(vma, nstart, nend, &locked);
 | |
| 		if (ret < 0) {
 | |
| 			if (ignore_errors) {
 | |
| 				ret = 0;
 | |
| 				continue;	/* continue at next VMA */
 | |
| 			}
 | |
| 			break;
 | |
| 		}
 | |
| 		nend = nstart + ret * PAGE_SIZE;
 | |
| 		ret = 0;
 | |
| 	}
 | |
| 	if (locked)
 | |
| 		up_read(&mm->mmap_sem);
 | |
| 	return ret;	/* 0 or negative error code */
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * get_dump_page() - pin user page in memory while writing it to core dump
 | |
|  * @addr: user address
 | |
|  *
 | |
|  * Returns struct page pointer of user page pinned for dump,
 | |
|  * to be freed afterwards by put_page().
 | |
|  *
 | |
|  * Returns NULL on any kind of failure - a hole must then be inserted into
 | |
|  * the corefile, to preserve alignment with its headers; and also returns
 | |
|  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
 | |
|  * allowing a hole to be left in the corefile to save diskspace.
 | |
|  *
 | |
|  * Called without mmap_sem, but after all other threads have been killed.
 | |
|  */
 | |
| #ifdef CONFIG_ELF_CORE
 | |
| struct page *get_dump_page(unsigned long addr)
 | |
| {
 | |
| 	struct vm_area_struct *vma;
 | |
| 	struct page *page;
 | |
| 
 | |
| 	if (__get_user_pages(current, current->mm, addr, 1,
 | |
| 			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
 | |
| 			     NULL) < 1)
 | |
| 		return NULL;
 | |
| 	flush_cache_page(vma, addr, page_to_pfn(page));
 | |
| 	return page;
 | |
| }
 | |
| #endif /* CONFIG_ELF_CORE */
 | |
| 
 | |
| /*
 | |
|  * Generic Fast GUP
 | |
|  *
 | |
|  * get_user_pages_fast attempts to pin user pages by walking the page
 | |
|  * tables directly and avoids taking locks. Thus the walker needs to be
 | |
|  * protected from page table pages being freed from under it, and should
 | |
|  * block any THP splits.
 | |
|  *
 | |
|  * One way to achieve this is to have the walker disable interrupts, and
 | |
|  * rely on IPIs from the TLB flushing code blocking before the page table
 | |
|  * pages are freed. This is unsuitable for architectures that do not need
 | |
|  * to broadcast an IPI when invalidating TLBs.
 | |
|  *
 | |
|  * Another way to achieve this is to batch up page table containing pages
 | |
|  * belonging to more than one mm_user, then rcu_sched a callback to free those
 | |
|  * pages. Disabling interrupts will allow the fast_gup walker to both block
 | |
|  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
 | |
|  * (which is a relatively rare event). The code below adopts this strategy.
 | |
|  *
 | |
|  * Before activating this code, please be aware that the following assumptions
 | |
|  * are currently made:
 | |
|  *
 | |
|  *  *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
 | |
|  *  free pages containing page tables or TLB flushing requires IPI broadcast.
 | |
|  *
 | |
|  *  *) ptes can be read atomically by the architecture.
 | |
|  *
 | |
|  *  *) access_ok is sufficient to validate userspace address ranges.
 | |
|  *
 | |
|  * The last two assumptions can be relaxed by the addition of helper functions.
 | |
|  *
 | |
|  * This code is based heavily on the PowerPC implementation by Nick Piggin.
 | |
|  */
 | |
| #ifdef CONFIG_HAVE_GENERIC_GUP
 | |
| 
 | |
| #ifndef gup_get_pte
 | |
| /*
 | |
|  * We assume that the PTE can be read atomically. If this is not the case for
 | |
|  * your architecture, please provide the helper.
 | |
|  */
 | |
| static inline pte_t gup_get_pte(pte_t *ptep)
 | |
| {
 | |
| 	return READ_ONCE(*ptep);
 | |
| }
 | |
| #endif
 | |
| 
 | |
| static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
 | |
| {
 | |
| 	while ((*nr) - nr_start) {
 | |
| 		struct page *page = pages[--(*nr)];
 | |
| 
 | |
| 		ClearPageReferenced(page);
 | |
| 		put_page(page);
 | |
| 	}
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
 | |
| static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 | |
| 			 int write, struct page **pages, int *nr)
 | |
| {
 | |
| 	struct dev_pagemap *pgmap = NULL;
 | |
| 	int nr_start = *nr, ret = 0;
 | |
| 	pte_t *ptep, *ptem;
 | |
| 
 | |
| 	ptem = ptep = pte_offset_map(&pmd, addr);
 | |
| 	do {
 | |
| 		pte_t pte = gup_get_pte(ptep);
 | |
| 		struct page *head, *page;
 | |
| 
 | |
| 		/*
 | |
| 		 * Similar to the PMD case below, NUMA hinting must take slow
 | |
| 		 * path using the pte_protnone check.
 | |
| 		 */
 | |
| 		if (pte_protnone(pte))
 | |
| 			goto pte_unmap;
 | |
| 
 | |
| 		if (!pte_access_permitted(pte, write))
 | |
| 			goto pte_unmap;
 | |
| 
 | |
| 		if (pte_devmap(pte)) {
 | |
| 			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
 | |
| 			if (unlikely(!pgmap)) {
 | |
| 				undo_dev_pagemap(nr, nr_start, pages);
 | |
| 				goto pte_unmap;
 | |
| 			}
 | |
| 		} else if (pte_special(pte))
 | |
| 			goto pte_unmap;
 | |
| 
 | |
| 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
 | |
| 		page = pte_page(pte);
 | |
| 		head = compound_head(page);
 | |
| 
 | |
| 		if (!page_cache_get_speculative(head))
 | |
| 			goto pte_unmap;
 | |
| 
 | |
| 		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
 | |
| 			put_page(head);
 | |
| 			goto pte_unmap;
 | |
| 		}
 | |
| 
 | |
| 		VM_BUG_ON_PAGE(compound_head(page) != head, page);
 | |
| 
 | |
| 		SetPageReferenced(page);
 | |
| 		pages[*nr] = page;
 | |
| 		(*nr)++;
 | |
| 
 | |
| 	} while (ptep++, addr += PAGE_SIZE, addr != end);
 | |
| 
 | |
| 	ret = 1;
 | |
| 
 | |
| pte_unmap:
 | |
| 	if (pgmap)
 | |
| 		put_dev_pagemap(pgmap);
 | |
| 	pte_unmap(ptem);
 | |
| 	return ret;
 | |
| }
 | |
| #else
 | |
| 
 | |
| /*
 | |
|  * If we can't determine whether or not a pte is special, then fail immediately
 | |
|  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
 | |
|  * to be special.
 | |
|  *
 | |
|  * For a futex to be placed on a THP tail page, get_futex_key requires a
 | |
|  * __get_user_pages_fast implementation that can pin pages. Thus it's still
 | |
|  * useful to have gup_huge_pmd even if we can't operate on ptes.
 | |
|  */
 | |
| static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
 | |
| 			 int write, struct page **pages, int *nr)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
 | |
| 
 | |
| #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
 | |
| static int __gup_device_huge(unsigned long pfn, unsigned long addr,
 | |
| 		unsigned long end, struct page **pages, int *nr)
 | |
| {
 | |
| 	int nr_start = *nr;
 | |
| 	struct dev_pagemap *pgmap = NULL;
 | |
| 
 | |
| 	do {
 | |
| 		struct page *page = pfn_to_page(pfn);
 | |
| 
 | |
| 		pgmap = get_dev_pagemap(pfn, pgmap);
 | |
| 		if (unlikely(!pgmap)) {
 | |
| 			undo_dev_pagemap(nr, nr_start, pages);
 | |
| 			return 0;
 | |
| 		}
 | |
| 		SetPageReferenced(page);
 | |
| 		pages[*nr] = page;
 | |
| 		get_page(page);
 | |
| 		(*nr)++;
 | |
| 		pfn++;
 | |
| 	} while (addr += PAGE_SIZE, addr != end);
 | |
| 
 | |
| 	if (pgmap)
 | |
| 		put_dev_pagemap(pgmap);
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
 | |
| 		unsigned long end, struct page **pages, int *nr)
 | |
| {
 | |
| 	unsigned long fault_pfn;
 | |
| 	int nr_start = *nr;
 | |
| 
 | |
| 	fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 | |
| 	if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
 | |
| 		return 0;
 | |
| 
 | |
| 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
 | |
| 		undo_dev_pagemap(nr, nr_start, pages);
 | |
| 		return 0;
 | |
| 	}
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
 | |
| 		unsigned long end, struct page **pages, int *nr)
 | |
| {
 | |
| 	unsigned long fault_pfn;
 | |
| 	int nr_start = *nr;
 | |
| 
 | |
| 	fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 | |
| 	if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
 | |
| 		return 0;
 | |
| 
 | |
| 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
 | |
| 		undo_dev_pagemap(nr, nr_start, pages);
 | |
| 		return 0;
 | |
| 	}
 | |
| 	return 1;
 | |
| }
 | |
| #else
 | |
| static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
 | |
| 		unsigned long end, struct page **pages, int *nr)
 | |
| {
 | |
| 	BUILD_BUG();
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
 | |
| 		unsigned long end, struct page **pages, int *nr)
 | |
| {
 | |
| 	BUILD_BUG();
 | |
| 	return 0;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
 | |
| 		unsigned long end, int write, struct page **pages, int *nr)
 | |
| {
 | |
| 	struct page *head, *page;
 | |
| 	int refs;
 | |
| 
 | |
| 	if (!pmd_access_permitted(orig, write))
 | |
| 		return 0;
 | |
| 
 | |
| 	if (pmd_devmap(orig))
 | |
| 		return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
 | |
| 
 | |
| 	refs = 0;
 | |
| 	page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 | |
| 	do {
 | |
| 		pages[*nr] = page;
 | |
| 		(*nr)++;
 | |
| 		page++;
 | |
| 		refs++;
 | |
| 	} while (addr += PAGE_SIZE, addr != end);
 | |
| 
 | |
| 	head = compound_head(pmd_page(orig));
 | |
| 	if (!page_cache_add_speculative(head, refs)) {
 | |
| 		*nr -= refs;
 | |
| 		return 0;
 | |
| 	}
 | |
| 
 | |
| 	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
 | |
| 		*nr -= refs;
 | |
| 		while (refs--)
 | |
| 			put_page(head);
 | |
| 		return 0;
 | |
| 	}
 | |
| 
 | |
| 	SetPageReferenced(head);
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
 | |
| 		unsigned long end, int write, struct page **pages, int *nr)
 | |
| {
 | |
| 	struct page *head, *page;
 | |
| 	int refs;
 | |
| 
 | |
| 	if (!pud_access_permitted(orig, write))
 | |
| 		return 0;
 | |
| 
 | |
| 	if (pud_devmap(orig))
 | |
| 		return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
 | |
| 
 | |
| 	refs = 0;
 | |
| 	page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 | |
| 	do {
 | |
| 		pages[*nr] = page;
 | |
| 		(*nr)++;
 | |
| 		page++;
 | |
| 		refs++;
 | |
| 	} while (addr += PAGE_SIZE, addr != end);
 | |
| 
 | |
| 	head = compound_head(pud_page(orig));
 | |
| 	if (!page_cache_add_speculative(head, refs)) {
 | |
| 		*nr -= refs;
 | |
| 		return 0;
 | |
| 	}
 | |
| 
 | |
| 	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
 | |
| 		*nr -= refs;
 | |
| 		while (refs--)
 | |
| 			put_page(head);
 | |
| 		return 0;
 | |
| 	}
 | |
| 
 | |
| 	SetPageReferenced(head);
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
 | |
| 			unsigned long end, int write,
 | |
| 			struct page **pages, int *nr)
 | |
| {
 | |
| 	int refs;
 | |
| 	struct page *head, *page;
 | |
| 
 | |
| 	if (!pgd_access_permitted(orig, write))
 | |
| 		return 0;
 | |
| 
 | |
| 	BUILD_BUG_ON(pgd_devmap(orig));
 | |
| 	refs = 0;
 | |
| 	page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
 | |
| 	do {
 | |
| 		pages[*nr] = page;
 | |
| 		(*nr)++;
 | |
| 		page++;
 | |
| 		refs++;
 | |
| 	} while (addr += PAGE_SIZE, addr != end);
 | |
| 
 | |
| 	head = compound_head(pgd_page(orig));
 | |
| 	if (!page_cache_add_speculative(head, refs)) {
 | |
| 		*nr -= refs;
 | |
| 		return 0;
 | |
| 	}
 | |
| 
 | |
| 	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
 | |
| 		*nr -= refs;
 | |
| 		while (refs--)
 | |
| 			put_page(head);
 | |
| 		return 0;
 | |
| 	}
 | |
| 
 | |
| 	SetPageReferenced(head);
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
 | |
| 		int write, struct page **pages, int *nr)
 | |
| {
 | |
| 	unsigned long next;
 | |
| 	pmd_t *pmdp;
 | |
| 
 | |
| 	pmdp = pmd_offset(&pud, addr);
 | |
| 	do {
 | |
| 		pmd_t pmd = READ_ONCE(*pmdp);
 | |
| 
 | |
| 		next = pmd_addr_end(addr, end);
 | |
| 		if (!pmd_present(pmd))
 | |
| 			return 0;
 | |
| 
 | |
| 		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
 | |
| 			/*
 | |
| 			 * NUMA hinting faults need to be handled in the GUP
 | |
| 			 * slowpath for accounting purposes and so that they
 | |
| 			 * can be serialised against THP migration.
 | |
| 			 */
 | |
| 			if (pmd_protnone(pmd))
 | |
| 				return 0;
 | |
| 
 | |
| 			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
 | |
| 				pages, nr))
 | |
| 				return 0;
 | |
| 
 | |
| 		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
 | |
| 			/*
 | |
| 			 * architecture have different format for hugetlbfs
 | |
| 			 * pmd format and THP pmd format
 | |
| 			 */
 | |
| 			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
 | |
| 					 PMD_SHIFT, next, write, pages, nr))
 | |
| 				return 0;
 | |
| 		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
 | |
| 			return 0;
 | |
| 	} while (pmdp++, addr = next, addr != end);
 | |
| 
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
 | |
| 			 int write, struct page **pages, int *nr)
 | |
| {
 | |
| 	unsigned long next;
 | |
| 	pud_t *pudp;
 | |
| 
 | |
| 	pudp = pud_offset(&p4d, addr);
 | |
| 	do {
 | |
| 		pud_t pud = READ_ONCE(*pudp);
 | |
| 
 | |
| 		next = pud_addr_end(addr, end);
 | |
| 		if (pud_none(pud))
 | |
| 			return 0;
 | |
| 		if (unlikely(pud_huge(pud))) {
 | |
| 			if (!gup_huge_pud(pud, pudp, addr, next, write,
 | |
| 					  pages, nr))
 | |
| 				return 0;
 | |
| 		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
 | |
| 			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
 | |
| 					 PUD_SHIFT, next, write, pages, nr))
 | |
| 				return 0;
 | |
| 		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
 | |
| 			return 0;
 | |
| 	} while (pudp++, addr = next, addr != end);
 | |
| 
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
 | |
| 			 int write, struct page **pages, int *nr)
 | |
| {
 | |
| 	unsigned long next;
 | |
| 	p4d_t *p4dp;
 | |
| 
 | |
| 	p4dp = p4d_offset(&pgd, addr);
 | |
| 	do {
 | |
| 		p4d_t p4d = READ_ONCE(*p4dp);
 | |
| 
 | |
| 		next = p4d_addr_end(addr, end);
 | |
| 		if (p4d_none(p4d))
 | |
| 			return 0;
 | |
| 		BUILD_BUG_ON(p4d_huge(p4d));
 | |
| 		if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
 | |
| 			if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
 | |
| 					 P4D_SHIFT, next, write, pages, nr))
 | |
| 				return 0;
 | |
| 		} else if (!gup_pud_range(p4d, addr, next, write, pages, nr))
 | |
| 			return 0;
 | |
| 	} while (p4dp++, addr = next, addr != end);
 | |
| 
 | |
| 	return 1;
 | |
| }
 | |
| 
 | |
| static void gup_pgd_range(unsigned long addr, unsigned long end,
 | |
| 		int write, struct page **pages, int *nr)
 | |
| {
 | |
| 	unsigned long next;
 | |
| 	pgd_t *pgdp;
 | |
| 
 | |
| 	pgdp = pgd_offset(current->mm, addr);
 | |
| 	do {
 | |
| 		pgd_t pgd = READ_ONCE(*pgdp);
 | |
| 
 | |
| 		next = pgd_addr_end(addr, end);
 | |
| 		if (pgd_none(pgd))
 | |
| 			return;
 | |
| 		if (unlikely(pgd_huge(pgd))) {
 | |
| 			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
 | |
| 					  pages, nr))
 | |
| 				return;
 | |
| 		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
 | |
| 			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
 | |
| 					 PGDIR_SHIFT, next, write, pages, nr))
 | |
| 				return;
 | |
| 		} else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
 | |
| 			return;
 | |
| 	} while (pgdp++, addr = next, addr != end);
 | |
| }
 | |
| 
 | |
| #ifndef gup_fast_permitted
 | |
| /*
 | |
|  * Check if it's allowed to use __get_user_pages_fast() for the range, or
 | |
|  * we need to fall back to the slow version:
 | |
|  */
 | |
| bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
 | |
| {
 | |
| 	unsigned long len, end;
 | |
| 
 | |
| 	len = (unsigned long) nr_pages << PAGE_SHIFT;
 | |
| 	end = start + len;
 | |
| 	return end >= start;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
 | |
|  * the regular GUP.
 | |
|  * Note a difference with get_user_pages_fast: this always returns the
 | |
|  * number of pages pinned, 0 if no pages were pinned.
 | |
|  */
 | |
| int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | |
| 			  struct page **pages)
 | |
| {
 | |
| 	unsigned long addr, len, end;
 | |
| 	unsigned long flags;
 | |
| 	int nr = 0;
 | |
| 
 | |
| 	start &= PAGE_MASK;
 | |
| 	addr = start;
 | |
| 	len = (unsigned long) nr_pages << PAGE_SHIFT;
 | |
| 	end = start + len;
 | |
| 
 | |
| 	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
 | |
| 					(void __user *)start, len)))
 | |
| 		return 0;
 | |
| 
 | |
| 	/*
 | |
| 	 * Disable interrupts.  We use the nested form as we can already have
 | |
| 	 * interrupts disabled by get_futex_key.
 | |
| 	 *
 | |
| 	 * With interrupts disabled, we block page table pages from being
 | |
| 	 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
 | |
| 	 * for more details.
 | |
| 	 *
 | |
| 	 * We do not adopt an rcu_read_lock(.) here as we also want to
 | |
| 	 * block IPIs that come from THPs splitting.
 | |
| 	 */
 | |
| 
 | |
| 	if (gup_fast_permitted(start, nr_pages, write)) {
 | |
| 		local_irq_save(flags);
 | |
| 		gup_pgd_range(addr, end, write, pages, &nr);
 | |
| 		local_irq_restore(flags);
 | |
| 	}
 | |
| 
 | |
| 	return nr;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * get_user_pages_fast() - pin user pages in memory
 | |
|  * @start:	starting user address
 | |
|  * @nr_pages:	number of pages from start to pin
 | |
|  * @write:	whether pages will be written to
 | |
|  * @pages:	array that receives pointers to the pages pinned.
 | |
|  *		Should be at least nr_pages long.
 | |
|  *
 | |
|  * Attempt to pin user pages in memory without taking mm->mmap_sem.
 | |
|  * If not successful, it will fall back to taking the lock and
 | |
|  * calling get_user_pages().
 | |
|  *
 | |
|  * Returns number of pages pinned. This may be fewer than the number
 | |
|  * requested. If nr_pages is 0 or negative, returns 0. If no pages
 | |
|  * were pinned, returns -errno.
 | |
|  */
 | |
| int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 | |
| 			struct page **pages)
 | |
| {
 | |
| 	unsigned long addr, len, end;
 | |
| 	int nr = 0, ret = 0;
 | |
| 
 | |
| 	start &= PAGE_MASK;
 | |
| 	addr = start;
 | |
| 	len = (unsigned long) nr_pages << PAGE_SHIFT;
 | |
| 	end = start + len;
 | |
| 
 | |
| 	if (nr_pages <= 0)
 | |
| 		return 0;
 | |
| 
 | |
| 	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
 | |
| 					(void __user *)start, len)))
 | |
| 		return -EFAULT;
 | |
| 
 | |
| 	if (gup_fast_permitted(start, nr_pages, write)) {
 | |
| 		local_irq_disable();
 | |
| 		gup_pgd_range(addr, end, write, pages, &nr);
 | |
| 		local_irq_enable();
 | |
| 		ret = nr;
 | |
| 	}
 | |
| 
 | |
| 	if (nr < nr_pages) {
 | |
| 		/* Try to get the remaining pages with get_user_pages */
 | |
| 		start += nr << PAGE_SHIFT;
 | |
| 		pages += nr;
 | |
| 
 | |
| 		ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
 | |
| 				write ? FOLL_WRITE : 0);
 | |
| 
 | |
| 		/* Have to be a bit careful with return values */
 | |
| 		if (nr > 0) {
 | |
| 			if (ret < 0)
 | |
| 				ret = nr;
 | |
| 			else
 | |
| 				ret += nr;
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| #endif /* CONFIG_HAVE_GENERIC_GUP */
 |