be2net: use device model DMA API
[cascardo/linux.git] / mm / mlock.c
index 67b3dd8..13e81ee 100644 (file)
@@ -155,13 +155,12 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add
  * vma->vm_mm->mmap_sem must be held for at least read.
  */
 static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-                                   unsigned long start, unsigned long end)
+                                   unsigned long start, unsigned long end,
+                                   int *nonblocking)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long addr = start;
-       struct page *pages[16]; /* 16 gives a reasonable batch */
        int nr_pages = (end - start) / PAGE_SIZE;
-       int ret = 0;
        int gup_flags;
 
        VM_BUG_ON(start & ~PAGE_MASK);
@@ -170,7 +169,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        VM_BUG_ON(end   > vma->vm_end);
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       gup_flags = FOLL_TOUCH | FOLL_GET;
+       gup_flags = FOLL_TOUCH;
        /*
         * We want to touch writable mappings with a write fault in order
         * to break COW, except for shared mappings because these don't COW
@@ -179,69 +178,17 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
                gup_flags |= FOLL_WRITE;
 
+       if (vma->vm_flags & VM_LOCKED)
+               gup_flags |= FOLL_MLOCK;
+
        /* We don't try to access the guard page of a stack vma */
        if (stack_guard_page(vma, start)) {
                addr += PAGE_SIZE;
                nr_pages--;
        }
 
-       while (nr_pages > 0) {
-               int i;
-
-               cond_resched();
-
-               /*
-                * get_user_pages makes pages present if we are
-                * setting mlock. and this extra reference count will
-                * disable migration of this page.  However, page may
-                * still be truncated out from under us.
-                */
-               ret = __get_user_pages(current, mm, addr,
-                               min_t(int, nr_pages, ARRAY_SIZE(pages)),
-                               gup_flags, pages, NULL);
-               /*
-                * This can happen for, e.g., VM_NONLINEAR regions before
-                * a page has been allocated and mapped at a given offset,
-                * or for addresses that map beyond end of a file.
-                * We'll mlock the pages if/when they get faulted in.
-                */
-               if (ret < 0)
-                       break;
-
-               lru_add_drain();        /* push cached pages to LRU */
-
-               for (i = 0; i < ret; i++) {
-                       struct page *page = pages[i];
-
-                       if (page->mapping) {
-                               /*
-                                * That preliminary check is mainly to avoid
-                                * the pointless overhead of lock_page on the
-                                * ZERO_PAGE: which might bounce very badly if
-                                * there is contention.  However, we're still
-                                * dirtying its cacheline with get/put_page:
-                                * we'll add another __get_user_pages flag to
-                                * avoid it if that case turns out to matter.
-                                */
-                               lock_page(page);
-                               /*
-                                * Because we lock page here and migration is
-                                * blocked by the elevated reference, we need
-                                * only check for file-cache page truncation.
-                                */
-                               if (page->mapping)
-                                       mlock_vma_page(page);
-                               unlock_page(page);
-                       }
-                       put_page(page); /* ref from get_user_pages() */
-               }
-
-               addr += ret * PAGE_SIZE;
-               nr_pages -= ret;
-               ret = 0;
-       }
-
-       return ret;     /* 0 or negative error code */
+       return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
+                               NULL, NULL, nonblocking);
 }
 
 /*
@@ -285,7 +232,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
                        is_vm_hugetlb_page(vma) ||
                        vma == get_gate_vma(current))) {
 
-               __mlock_vma_pages_range(vma, start, end);
+               __mlock_vma_pages_range(vma, start, end, NULL);
 
                /* Hide errors from mmap() and other callers */
                return 0;
@@ -481,21 +428,23 @@ static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
        struct mm_struct *mm = current->mm;
        unsigned long end, nstart, nend;
        struct vm_area_struct *vma = NULL;
+       int locked = 0;
        int ret = 0;
 
        VM_BUG_ON(start & ~PAGE_MASK);
        VM_BUG_ON(len != PAGE_ALIGN(len));
        end = start + len;
 
-       down_read(&mm->mmap_sem);
        for (nstart = start; nstart < end; nstart = nend) {
                /*
                 * We want to fault in pages for [nstart; end) address range.
                 * Find first corresponding VMA.
                 */
-               if (!vma)
+               if (!locked) {
+                       locked = 1;
+                       down_read(&mm->mmap_sem);
                        vma = find_vma(mm, nstart);
-               else
+               } else if (nstart >= vma->vm_end)
                        vma = vma->vm_next;
                if (!vma || vma->vm_start >= end)
                        break;
@@ -509,22 +458,24 @@ static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
                if (nstart < vma->vm_start)
                        nstart = vma->vm_start;
                /*
-                * Now fault in a range of pages within the first VMA.
+                * Now fault in a range of pages. __mlock_vma_pages_range()
+                * double checks the vma flags, so that it won't mlock pages
+                * if the vma was already munlocked.
                 */
-               if (vma->vm_flags & VM_LOCKED) {
-                       ret = __mlock_vma_pages_range(vma, nstart, nend);
-                       if (ret < 0 && ignore_errors) {
+               ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
+               if (ret < 0) {
+                       if (ignore_errors) {
                                ret = 0;
                                continue;       /* continue at next VMA */
                        }
-                       if (ret) {
-                               ret = __mlock_posix_error_return(ret);
-                               break;
-                       }
-               } else
-                       make_pages_present(nstart, nend);
+                       ret = __mlock_posix_error_return(ret);
+                       break;
+               }
+               nend = nstart + ret * PAGE_SIZE;
+               ret = 0;
        }
-       up_read(&mm->mmap_sem);
+       if (locked)
+               up_read(&mm->mmap_sem);
        return ret;     /* 0 or negative error code */
 }