fs/dax: remove wmb_pmem()
[cascardo/linux.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34
35 /*
36  * We use lowest available bit in exceptional entry for locking, other two
37  * bits to determine entry type. In total 3 special bits.
38  */
39 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
40 #define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
41 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
42 #define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
43 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
44 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
45 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
46                 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
47                 RADIX_TREE_EXCEPTIONAL_ENTRY))
48
49 /* We choose 4096 entries - same as per-zone page wait tables */
50 #define DAX_WAIT_TABLE_BITS 12
51 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
52
53 wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
54
55 static int __init init_dax_wait_table(void)
56 {
57         int i;
58
59         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
60                 init_waitqueue_head(wait_table + i);
61         return 0;
62 }
63 fs_initcall(init_dax_wait_table);
64
65 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
66                                               pgoff_t index)
67 {
68         unsigned long hash = hash_long((unsigned long)mapping ^ index,
69                                        DAX_WAIT_TABLE_BITS);
70         return wait_table + hash;
71 }
72
73 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
74 {
75         struct request_queue *q = bdev->bd_queue;
76         long rc = -EIO;
77
78         dax->addr = (void __pmem *) ERR_PTR(-EIO);
79         if (blk_queue_enter(q, true) != 0)
80                 return rc;
81
82         rc = bdev_direct_access(bdev, dax);
83         if (rc < 0) {
84                 dax->addr = (void __pmem *) ERR_PTR(rc);
85                 blk_queue_exit(q);
86                 return rc;
87         }
88         return rc;
89 }
90
91 static void dax_unmap_atomic(struct block_device *bdev,
92                 const struct blk_dax_ctl *dax)
93 {
94         if (IS_ERR(dax->addr))
95                 return;
96         blk_queue_exit(bdev->bd_queue);
97 }
98
99 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
100 {
101         struct page *page = alloc_pages(GFP_KERNEL, 0);
102         struct blk_dax_ctl dax = {
103                 .size = PAGE_SIZE,
104                 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
105         };
106         long rc;
107
108         if (!page)
109                 return ERR_PTR(-ENOMEM);
110
111         rc = dax_map_atomic(bdev, &dax);
112         if (rc < 0)
113                 return ERR_PTR(rc);
114         memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
115         dax_unmap_atomic(bdev, &dax);
116         return page;
117 }
118
119 static bool buffer_written(struct buffer_head *bh)
120 {
121         return buffer_mapped(bh) && !buffer_unwritten(bh);
122 }
123
124 /*
125  * When ext4 encounters a hole, it returns without modifying the buffer_head
126  * which means that we can't trust b_size.  To cope with this, we set b_state
127  * to 0 before calling get_block and, if any bit is set, we know we can trust
128  * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
129  * and would save us time calling get_block repeatedly.
130  */
131 static bool buffer_size_valid(struct buffer_head *bh)
132 {
133         return bh->b_state != 0;
134 }
135
136
137 static sector_t to_sector(const struct buffer_head *bh,
138                 const struct inode *inode)
139 {
140         sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
141
142         return sector;
143 }
144
145 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
146                       loff_t start, loff_t end, get_block_t get_block,
147                       struct buffer_head *bh)
148 {
149         loff_t pos = start, max = start, bh_max = start;
150         bool hole = false;
151         struct block_device *bdev = NULL;
152         int rw = iov_iter_rw(iter), rc;
153         long map_len = 0;
154         struct blk_dax_ctl dax = {
155                 .addr = (void __pmem *) ERR_PTR(-EIO),
156         };
157         unsigned blkbits = inode->i_blkbits;
158         sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
159                                                                 >> blkbits;
160
161         if (rw == READ)
162                 end = min(end, i_size_read(inode));
163
164         while (pos < end) {
165                 size_t len;
166                 if (pos == max) {
167                         long page = pos >> PAGE_SHIFT;
168                         sector_t block = page << (PAGE_SHIFT - blkbits);
169                         unsigned first = pos - (block << blkbits);
170                         long size;
171
172                         if (pos == bh_max) {
173                                 bh->b_size = PAGE_ALIGN(end - pos);
174                                 bh->b_state = 0;
175                                 rc = get_block(inode, block, bh, rw == WRITE);
176                                 if (rc)
177                                         break;
178                                 if (!buffer_size_valid(bh))
179                                         bh->b_size = 1 << blkbits;
180                                 bh_max = pos - first + bh->b_size;
181                                 bdev = bh->b_bdev;
182                                 /*
183                                  * We allow uninitialized buffers for writes
184                                  * beyond EOF as those cannot race with faults
185                                  */
186                                 WARN_ON_ONCE(
187                                         (buffer_new(bh) && block < file_blks) ||
188                                         (rw == WRITE && buffer_unwritten(bh)));
189                         } else {
190                                 unsigned done = bh->b_size -
191                                                 (bh_max - (pos - first));
192                                 bh->b_blocknr += done >> blkbits;
193                                 bh->b_size -= done;
194                         }
195
196                         hole = rw == READ && !buffer_written(bh);
197                         if (hole) {
198                                 size = bh->b_size - first;
199                         } else {
200                                 dax_unmap_atomic(bdev, &dax);
201                                 dax.sector = to_sector(bh, inode);
202                                 dax.size = bh->b_size;
203                                 map_len = dax_map_atomic(bdev, &dax);
204                                 if (map_len < 0) {
205                                         rc = map_len;
206                                         break;
207                                 }
208                                 dax.addr += first;
209                                 size = map_len - first;
210                         }
211                         max = min(pos + size, end);
212                 }
213
214                 if (iov_iter_rw(iter) == WRITE) {
215                         len = copy_from_iter_pmem(dax.addr, max - pos, iter);
216                 } else if (!hole)
217                         len = copy_to_iter((void __force *) dax.addr, max - pos,
218                                         iter);
219                 else
220                         len = iov_iter_zero(max - pos, iter);
221
222                 if (!len) {
223                         rc = -EFAULT;
224                         break;
225                 }
226
227                 pos += len;
228                 if (!IS_ERR(dax.addr))
229                         dax.addr += len;
230         }
231
232         dax_unmap_atomic(bdev, &dax);
233
234         return (pos == start) ? rc : pos - start;
235 }
236
237 /**
238  * dax_do_io - Perform I/O to a DAX file
239  * @iocb: The control block for this I/O
240  * @inode: The file which the I/O is directed at
241  * @iter: The addresses to do I/O from or to
242  * @get_block: The filesystem method used to translate file offsets to blocks
243  * @end_io: A filesystem callback for I/O completion
244  * @flags: See below
245  *
246  * This function uses the same locking scheme as do_blockdev_direct_IO:
247  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
248  * caller for writes.  For reads, we take and release the i_mutex ourselves.
249  * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
250  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
251  * is in progress.
252  */
253 ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
254                   struct iov_iter *iter, get_block_t get_block,
255                   dio_iodone_t end_io, int flags)
256 {
257         struct buffer_head bh;
258         ssize_t retval = -EINVAL;
259         loff_t pos = iocb->ki_pos;
260         loff_t end = pos + iov_iter_count(iter);
261
262         memset(&bh, 0, sizeof(bh));
263         bh.b_bdev = inode->i_sb->s_bdev;
264
265         if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
266                 inode_lock(inode);
267
268         /* Protects against truncate */
269         if (!(flags & DIO_SKIP_DIO_COUNT))
270                 inode_dio_begin(inode);
271
272         retval = dax_io(inode, iter, pos, end, get_block, &bh);
273
274         if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
275                 inode_unlock(inode);
276
277         if (end_io) {
278                 int err;
279
280                 err = end_io(iocb, pos, retval, bh.b_private);
281                 if (err)
282                         retval = err;
283         }
284
285         if (!(flags & DIO_SKIP_DIO_COUNT))
286                 inode_dio_end(inode);
287         return retval;
288 }
289 EXPORT_SYMBOL_GPL(dax_do_io);
290
291 /*
292  * DAX radix tree locking
293  */
294 struct exceptional_entry_key {
295         struct address_space *mapping;
296         unsigned long index;
297 };
298
299 struct wait_exceptional_entry_queue {
300         wait_queue_t wait;
301         struct exceptional_entry_key key;
302 };
303
304 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
305                                        int sync, void *keyp)
306 {
307         struct exceptional_entry_key *key = keyp;
308         struct wait_exceptional_entry_queue *ewait =
309                 container_of(wait, struct wait_exceptional_entry_queue, wait);
310
311         if (key->mapping != ewait->key.mapping ||
312             key->index != ewait->key.index)
313                 return 0;
314         return autoremove_wake_function(wait, mode, sync, NULL);
315 }
316
317 /*
318  * Check whether the given slot is locked. The function must be called with
319  * mapping->tree_lock held
320  */
321 static inline int slot_locked(struct address_space *mapping, void **slot)
322 {
323         unsigned long entry = (unsigned long)
324                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
325         return entry & RADIX_DAX_ENTRY_LOCK;
326 }
327
328 /*
329  * Mark the given slot is locked. The function must be called with
330  * mapping->tree_lock held
331  */
332 static inline void *lock_slot(struct address_space *mapping, void **slot)
333 {
334         unsigned long entry = (unsigned long)
335                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
336
337         entry |= RADIX_DAX_ENTRY_LOCK;
338         radix_tree_replace_slot(slot, (void *)entry);
339         return (void *)entry;
340 }
341
342 /*
343  * Mark the given slot is unlocked. The function must be called with
344  * mapping->tree_lock held
345  */
346 static inline void *unlock_slot(struct address_space *mapping, void **slot)
347 {
348         unsigned long entry = (unsigned long)
349                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
350
351         entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
352         radix_tree_replace_slot(slot, (void *)entry);
353         return (void *)entry;
354 }
355
356 /*
357  * Lookup entry in radix tree, wait for it to become unlocked if it is
358  * exceptional entry and return it. The caller must call
359  * put_unlocked_mapping_entry() when he decided not to lock the entry or
360  * put_locked_mapping_entry() when he locked the entry and now wants to
361  * unlock it.
362  *
363  * The function must be called with mapping->tree_lock held.
364  */
365 static void *get_unlocked_mapping_entry(struct address_space *mapping,
366                                         pgoff_t index, void ***slotp)
367 {
368         void *ret, **slot;
369         struct wait_exceptional_entry_queue ewait;
370         wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
371
372         init_wait(&ewait.wait);
373         ewait.wait.func = wake_exceptional_entry_func;
374         ewait.key.mapping = mapping;
375         ewait.key.index = index;
376
377         for (;;) {
378                 ret = __radix_tree_lookup(&mapping->page_tree, index, NULL,
379                                           &slot);
380                 if (!ret || !radix_tree_exceptional_entry(ret) ||
381                     !slot_locked(mapping, slot)) {
382                         if (slotp)
383                                 *slotp = slot;
384                         return ret;
385                 }
386                 prepare_to_wait_exclusive(wq, &ewait.wait,
387                                           TASK_UNINTERRUPTIBLE);
388                 spin_unlock_irq(&mapping->tree_lock);
389                 schedule();
390                 finish_wait(wq, &ewait.wait);
391                 spin_lock_irq(&mapping->tree_lock);
392         }
393 }
394
395 /*
396  * Find radix tree entry at given index. If it points to a page, return with
397  * the page locked. If it points to the exceptional entry, return with the
398  * radix tree entry locked. If the radix tree doesn't contain given index,
399  * create empty exceptional entry for the index and return with it locked.
400  *
401  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
402  * persistent memory the benefit is doubtful. We can add that later if we can
403  * show it helps.
404  */
405 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
406 {
407         void *ret, **slot;
408
409 restart:
410         spin_lock_irq(&mapping->tree_lock);
411         ret = get_unlocked_mapping_entry(mapping, index, &slot);
412         /* No entry for given index? Make sure radix tree is big enough. */
413         if (!ret) {
414                 int err;
415
416                 spin_unlock_irq(&mapping->tree_lock);
417                 err = radix_tree_preload(
418                                 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
419                 if (err)
420                         return ERR_PTR(err);
421                 ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
422                                RADIX_DAX_ENTRY_LOCK);
423                 spin_lock_irq(&mapping->tree_lock);
424                 err = radix_tree_insert(&mapping->page_tree, index, ret);
425                 radix_tree_preload_end();
426                 if (err) {
427                         spin_unlock_irq(&mapping->tree_lock);
428                         /* Someone already created the entry? */
429                         if (err == -EEXIST)
430                                 goto restart;
431                         return ERR_PTR(err);
432                 }
433                 /* Good, we have inserted empty locked entry into the tree. */
434                 mapping->nrexceptional++;
435                 spin_unlock_irq(&mapping->tree_lock);
436                 return ret;
437         }
438         /* Normal page in radix tree? */
439         if (!radix_tree_exceptional_entry(ret)) {
440                 struct page *page = ret;
441
442                 get_page(page);
443                 spin_unlock_irq(&mapping->tree_lock);
444                 lock_page(page);
445                 /* Page got truncated? Retry... */
446                 if (unlikely(page->mapping != mapping)) {
447                         unlock_page(page);
448                         put_page(page);
449                         goto restart;
450                 }
451                 return page;
452         }
453         ret = lock_slot(mapping, slot);
454         spin_unlock_irq(&mapping->tree_lock);
455         return ret;
456 }
457
458 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
459                                    pgoff_t index, bool wake_all)
460 {
461         wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
462
463         /*
464          * Checking for locked entry and prepare_to_wait_exclusive() happens
465          * under mapping->tree_lock, ditto for entry handling in our callers.
466          * So at this point all tasks that could have seen our entry locked
467          * must be in the waitqueue and the following check will see them.
468          */
469         if (waitqueue_active(wq)) {
470                 struct exceptional_entry_key key;
471
472                 key.mapping = mapping;
473                 key.index = index;
474                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
475         }
476 }
477
478 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
479 {
480         void *ret, **slot;
481
482         spin_lock_irq(&mapping->tree_lock);
483         ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
484         if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) ||
485                          !slot_locked(mapping, slot))) {
486                 spin_unlock_irq(&mapping->tree_lock);
487                 return;
488         }
489         unlock_slot(mapping, slot);
490         spin_unlock_irq(&mapping->tree_lock);
491         dax_wake_mapping_entry_waiter(mapping, index, false);
492 }
493
494 static void put_locked_mapping_entry(struct address_space *mapping,
495                                      pgoff_t index, void *entry)
496 {
497         if (!radix_tree_exceptional_entry(entry)) {
498                 unlock_page(entry);
499                 put_page(entry);
500         } else {
501                 dax_unlock_mapping_entry(mapping, index);
502         }
503 }
504
505 /*
506  * Called when we are done with radix tree entry we looked up via
507  * get_unlocked_mapping_entry() and which we didn't lock in the end.
508  */
509 static void put_unlocked_mapping_entry(struct address_space *mapping,
510                                        pgoff_t index, void *entry)
511 {
512         if (!radix_tree_exceptional_entry(entry))
513                 return;
514
515         /* We have to wake up next waiter for the radix tree entry lock */
516         dax_wake_mapping_entry_waiter(mapping, index, false);
517 }
518
519 /*
520  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
521  * entry to get unlocked before deleting it.
522  */
523 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
524 {
525         void *entry;
526
527         spin_lock_irq(&mapping->tree_lock);
528         entry = get_unlocked_mapping_entry(mapping, index, NULL);
529         /*
530          * This gets called from truncate / punch_hole path. As such, the caller
531          * must hold locks protecting against concurrent modifications of the
532          * radix tree (usually fs-private i_mmap_sem for writing). Since the
533          * caller has seen exceptional entry for this index, we better find it
534          * at that index as well...
535          */
536         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
537                 spin_unlock_irq(&mapping->tree_lock);
538                 return 0;
539         }
540         radix_tree_delete(&mapping->page_tree, index);
541         mapping->nrexceptional--;
542         spin_unlock_irq(&mapping->tree_lock);
543         dax_wake_mapping_entry_waiter(mapping, index, true);
544
545         return 1;
546 }
547
548 /*
549  * The user has performed a load from a hole in the file.  Allocating
550  * a new page in the file would cause excessive storage usage for
551  * workloads with sparse files.  We allocate a page cache page instead.
552  * We'll kick it out of the page cache if it's ever written to,
553  * otherwise it will simply fall out of the page cache under memory
554  * pressure without ever having been dirtied.
555  */
556 static int dax_load_hole(struct address_space *mapping, void *entry,
557                          struct vm_fault *vmf)
558 {
559         struct page *page;
560
561         /* Hole page already exists? Return it...  */
562         if (!radix_tree_exceptional_entry(entry)) {
563                 vmf->page = entry;
564                 return VM_FAULT_LOCKED;
565         }
566
567         /* This will replace locked radix tree entry with a hole page */
568         page = find_or_create_page(mapping, vmf->pgoff,
569                                    vmf->gfp_mask | __GFP_ZERO);
570         if (!page) {
571                 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
572                 return VM_FAULT_OOM;
573         }
574         vmf->page = page;
575         return VM_FAULT_LOCKED;
576 }
577
578 static int copy_user_bh(struct page *to, struct inode *inode,
579                 struct buffer_head *bh, unsigned long vaddr)
580 {
581         struct blk_dax_ctl dax = {
582                 .sector = to_sector(bh, inode),
583                 .size = bh->b_size,
584         };
585         struct block_device *bdev = bh->b_bdev;
586         void *vto;
587
588         if (dax_map_atomic(bdev, &dax) < 0)
589                 return PTR_ERR(dax.addr);
590         vto = kmap_atomic(to);
591         copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
592         kunmap_atomic(vto);
593         dax_unmap_atomic(bdev, &dax);
594         return 0;
595 }
596
597 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
598
599 static void *dax_insert_mapping_entry(struct address_space *mapping,
600                                       struct vm_fault *vmf,
601                                       void *entry, sector_t sector)
602 {
603         struct radix_tree_root *page_tree = &mapping->page_tree;
604         int error = 0;
605         bool hole_fill = false;
606         void *new_entry;
607         pgoff_t index = vmf->pgoff;
608
609         if (vmf->flags & FAULT_FLAG_WRITE)
610                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
611
612         /* Replacing hole page with block mapping? */
613         if (!radix_tree_exceptional_entry(entry)) {
614                 hole_fill = true;
615                 /*
616                  * Unmap the page now before we remove it from page cache below.
617                  * The page is locked so it cannot be faulted in again.
618                  */
619                 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
620                                     PAGE_SIZE, 0);
621                 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
622                 if (error)
623                         return ERR_PTR(error);
624         }
625
626         spin_lock_irq(&mapping->tree_lock);
627         new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) |
628                        RADIX_DAX_ENTRY_LOCK);
629         if (hole_fill) {
630                 __delete_from_page_cache(entry, NULL);
631                 /* Drop pagecache reference */
632                 put_page(entry);
633                 error = radix_tree_insert(page_tree, index, new_entry);
634                 if (error) {
635                         new_entry = ERR_PTR(error);
636                         goto unlock;
637                 }
638                 mapping->nrexceptional++;
639         } else {
640                 void **slot;
641                 void *ret;
642
643                 ret = __radix_tree_lookup(page_tree, index, NULL, &slot);
644                 WARN_ON_ONCE(ret != entry);
645                 radix_tree_replace_slot(slot, new_entry);
646         }
647         if (vmf->flags & FAULT_FLAG_WRITE)
648                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
649  unlock:
650         spin_unlock_irq(&mapping->tree_lock);
651         if (hole_fill) {
652                 radix_tree_preload_end();
653                 /*
654                  * We don't need hole page anymore, it has been replaced with
655                  * locked radix tree entry now.
656                  */
657                 if (mapping->a_ops->freepage)
658                         mapping->a_ops->freepage(entry);
659                 unlock_page(entry);
660                 put_page(entry);
661         }
662         return new_entry;
663 }
664
665 static int dax_writeback_one(struct block_device *bdev,
666                 struct address_space *mapping, pgoff_t index, void *entry)
667 {
668         struct radix_tree_root *page_tree = &mapping->page_tree;
669         int type = RADIX_DAX_TYPE(entry);
670         struct radix_tree_node *node;
671         struct blk_dax_ctl dax;
672         void **slot;
673         int ret = 0;
674
675         spin_lock_irq(&mapping->tree_lock);
676         /*
677          * Regular page slots are stabilized by the page lock even
678          * without the tree itself locked.  These unlocked entries
679          * need verification under the tree lock.
680          */
681         if (!__radix_tree_lookup(page_tree, index, &node, &slot))
682                 goto unlock;
683         if (*slot != entry)
684                 goto unlock;
685
686         /* another fsync thread may have already written back this entry */
687         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
688                 goto unlock;
689
690         if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
691                 ret = -EIO;
692                 goto unlock;
693         }
694
695         dax.sector = RADIX_DAX_SECTOR(entry);
696         dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
697         spin_unlock_irq(&mapping->tree_lock);
698
699         /*
700          * We cannot hold tree_lock while calling dax_map_atomic() because it
701          * eventually calls cond_resched().
702          */
703         ret = dax_map_atomic(bdev, &dax);
704         if (ret < 0)
705                 return ret;
706
707         if (WARN_ON_ONCE(ret < dax.size)) {
708                 ret = -EIO;
709                 goto unmap;
710         }
711
712         wb_cache_pmem(dax.addr, dax.size);
713
714         spin_lock_irq(&mapping->tree_lock);
715         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
716         spin_unlock_irq(&mapping->tree_lock);
717  unmap:
718         dax_unmap_atomic(bdev, &dax);
719         return ret;
720
721  unlock:
722         spin_unlock_irq(&mapping->tree_lock);
723         return ret;
724 }
725
726 /*
727  * Flush the mapping to the persistent domain within the byte range of [start,
728  * end]. This is required by data integrity operations to ensure file data is
729  * on persistent storage prior to completion of the operation.
730  */
731 int dax_writeback_mapping_range(struct address_space *mapping,
732                 struct block_device *bdev, struct writeback_control *wbc)
733 {
734         struct inode *inode = mapping->host;
735         pgoff_t start_index, end_index, pmd_index;
736         pgoff_t indices[PAGEVEC_SIZE];
737         struct pagevec pvec;
738         bool done = false;
739         int i, ret = 0;
740         void *entry;
741
742         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
743                 return -EIO;
744
745         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
746                 return 0;
747
748         start_index = wbc->range_start >> PAGE_SHIFT;
749         end_index = wbc->range_end >> PAGE_SHIFT;
750         pmd_index = DAX_PMD_INDEX(start_index);
751
752         rcu_read_lock();
753         entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
754         rcu_read_unlock();
755
756         /* see if the start of our range is covered by a PMD entry */
757         if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
758                 start_index = pmd_index;
759
760         tag_pages_for_writeback(mapping, start_index, end_index);
761
762         pagevec_init(&pvec, 0);
763         while (!done) {
764                 pvec.nr = find_get_entries_tag(mapping, start_index,
765                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
766                                 pvec.pages, indices);
767
768                 if (pvec.nr == 0)
769                         break;
770
771                 for (i = 0; i < pvec.nr; i++) {
772                         if (indices[i] > end_index) {
773                                 done = true;
774                                 break;
775                         }
776
777                         ret = dax_writeback_one(bdev, mapping, indices[i],
778                                         pvec.pages[i]);
779                         if (ret < 0)
780                                 return ret;
781                 }
782         }
783         return 0;
784 }
785 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
786
787 static int dax_insert_mapping(struct address_space *mapping,
788                         struct buffer_head *bh, void **entryp,
789                         struct vm_area_struct *vma, struct vm_fault *vmf)
790 {
791         unsigned long vaddr = (unsigned long)vmf->virtual_address;
792         struct block_device *bdev = bh->b_bdev;
793         struct blk_dax_ctl dax = {
794                 .sector = to_sector(bh, mapping->host),
795                 .size = bh->b_size,
796         };
797         void *ret;
798         void *entry = *entryp;
799
800         if (dax_map_atomic(bdev, &dax) < 0)
801                 return PTR_ERR(dax.addr);
802         dax_unmap_atomic(bdev, &dax);
803
804         ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
805         if (IS_ERR(ret))
806                 return PTR_ERR(ret);
807         *entryp = ret;
808
809         return vm_insert_mixed(vma, vaddr, dax.pfn);
810 }
811
812 /**
813  * __dax_fault - handle a page fault on a DAX file
814  * @vma: The virtual memory area where the fault occurred
815  * @vmf: The description of the fault
816  * @get_block: The filesystem method used to translate file offsets to blocks
817  *
818  * When a page fault occurs, filesystems may call this helper in their
819  * fault handler for DAX files. __dax_fault() assumes the caller has done all
820  * the necessary locking for the page fault to proceed successfully.
821  */
822 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
823                         get_block_t get_block)
824 {
825         struct file *file = vma->vm_file;
826         struct address_space *mapping = file->f_mapping;
827         struct inode *inode = mapping->host;
828         void *entry;
829         struct buffer_head bh;
830         unsigned long vaddr = (unsigned long)vmf->virtual_address;
831         unsigned blkbits = inode->i_blkbits;
832         sector_t block;
833         pgoff_t size;
834         int error;
835         int major = 0;
836
837         /*
838          * Check whether offset isn't beyond end of file now. Caller is supposed
839          * to hold locks serializing us with truncate / punch hole so this is
840          * a reliable test.
841          */
842         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
843         if (vmf->pgoff >= size)
844                 return VM_FAULT_SIGBUS;
845
846         memset(&bh, 0, sizeof(bh));
847         block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
848         bh.b_bdev = inode->i_sb->s_bdev;
849         bh.b_size = PAGE_SIZE;
850
851         entry = grab_mapping_entry(mapping, vmf->pgoff);
852         if (IS_ERR(entry)) {
853                 error = PTR_ERR(entry);
854                 goto out;
855         }
856
857         error = get_block(inode, block, &bh, 0);
858         if (!error && (bh.b_size < PAGE_SIZE))
859                 error = -EIO;           /* fs corruption? */
860         if (error)
861                 goto unlock_entry;
862
863         if (vmf->cow_page) {
864                 struct page *new_page = vmf->cow_page;
865                 if (buffer_written(&bh))
866                         error = copy_user_bh(new_page, inode, &bh, vaddr);
867                 else
868                         clear_user_highpage(new_page, vaddr);
869                 if (error)
870                         goto unlock_entry;
871                 if (!radix_tree_exceptional_entry(entry)) {
872                         vmf->page = entry;
873                         return VM_FAULT_LOCKED;
874                 }
875                 vmf->entry = entry;
876                 return VM_FAULT_DAX_LOCKED;
877         }
878
879         if (!buffer_mapped(&bh)) {
880                 if (vmf->flags & FAULT_FLAG_WRITE) {
881                         error = get_block(inode, block, &bh, 1);
882                         count_vm_event(PGMAJFAULT);
883                         mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
884                         major = VM_FAULT_MAJOR;
885                         if (!error && (bh.b_size < PAGE_SIZE))
886                                 error = -EIO;
887                         if (error)
888                                 goto unlock_entry;
889                 } else {
890                         return dax_load_hole(mapping, entry, vmf);
891                 }
892         }
893
894         /* Filesystem should not return unwritten buffers to us! */
895         WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
896         error = dax_insert_mapping(mapping, &bh, &entry, vma, vmf);
897  unlock_entry:
898         put_locked_mapping_entry(mapping, vmf->pgoff, entry);
899  out:
900         if (error == -ENOMEM)
901                 return VM_FAULT_OOM | major;
902         /* -EBUSY is fine, somebody else faulted on the same PTE */
903         if ((error < 0) && (error != -EBUSY))
904                 return VM_FAULT_SIGBUS | major;
905         return VM_FAULT_NOPAGE | major;
906 }
907 EXPORT_SYMBOL(__dax_fault);
908
909 /**
910  * dax_fault - handle a page fault on a DAX file
911  * @vma: The virtual memory area where the fault occurred
912  * @vmf: The description of the fault
913  * @get_block: The filesystem method used to translate file offsets to blocks
914  *
915  * When a page fault occurs, filesystems may call this helper in their
916  * fault handler for DAX files.
917  */
918 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
919               get_block_t get_block)
920 {
921         int result;
922         struct super_block *sb = file_inode(vma->vm_file)->i_sb;
923
924         if (vmf->flags & FAULT_FLAG_WRITE) {
925                 sb_start_pagefault(sb);
926                 file_update_time(vma->vm_file);
927         }
928         result = __dax_fault(vma, vmf, get_block);
929         if (vmf->flags & FAULT_FLAG_WRITE)
930                 sb_end_pagefault(sb);
931
932         return result;
933 }
934 EXPORT_SYMBOL_GPL(dax_fault);
935
936 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
937 /*
938  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
939  * more often than one might expect in the below function.
940  */
941 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
942
943 static void __dax_dbg(struct buffer_head *bh, unsigned long address,
944                 const char *reason, const char *fn)
945 {
946         if (bh) {
947                 char bname[BDEVNAME_SIZE];
948                 bdevname(bh->b_bdev, bname);
949                 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
950                         "length %zd fallback: %s\n", fn, current->comm,
951                         address, bname, bh->b_state, (u64)bh->b_blocknr,
952                         bh->b_size, reason);
953         } else {
954                 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
955                         current->comm, address, reason);
956         }
957 }
958
959 #define dax_pmd_dbg(bh, address, reason)        __dax_dbg(bh, address, reason, "dax_pmd")
960
961 int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
962                 pmd_t *pmd, unsigned int flags, get_block_t get_block)
963 {
964         struct file *file = vma->vm_file;
965         struct address_space *mapping = file->f_mapping;
966         struct inode *inode = mapping->host;
967         struct buffer_head bh;
968         unsigned blkbits = inode->i_blkbits;
969         unsigned long pmd_addr = address & PMD_MASK;
970         bool write = flags & FAULT_FLAG_WRITE;
971         struct block_device *bdev;
972         pgoff_t size, pgoff;
973         sector_t block;
974         int result = 0;
975         bool alloc = false;
976
977         /* dax pmd mappings require pfn_t_devmap() */
978         if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
979                 return VM_FAULT_FALLBACK;
980
981         /* Fall back to PTEs if we're going to COW */
982         if (write && !(vma->vm_flags & VM_SHARED)) {
983                 split_huge_pmd(vma, pmd, address);
984                 dax_pmd_dbg(NULL, address, "cow write");
985                 return VM_FAULT_FALLBACK;
986         }
987         /* If the PMD would extend outside the VMA */
988         if (pmd_addr < vma->vm_start) {
989                 dax_pmd_dbg(NULL, address, "vma start unaligned");
990                 return VM_FAULT_FALLBACK;
991         }
992         if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
993                 dax_pmd_dbg(NULL, address, "vma end unaligned");
994                 return VM_FAULT_FALLBACK;
995         }
996
997         pgoff = linear_page_index(vma, pmd_addr);
998         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
999         if (pgoff >= size)
1000                 return VM_FAULT_SIGBUS;
1001         /* If the PMD would cover blocks out of the file */
1002         if ((pgoff | PG_PMD_COLOUR) >= size) {
1003                 dax_pmd_dbg(NULL, address,
1004                                 "offset + huge page size > file size");
1005                 return VM_FAULT_FALLBACK;
1006         }
1007
1008         memset(&bh, 0, sizeof(bh));
1009         bh.b_bdev = inode->i_sb->s_bdev;
1010         block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
1011
1012         bh.b_size = PMD_SIZE;
1013
1014         if (get_block(inode, block, &bh, 0) != 0)
1015                 return VM_FAULT_SIGBUS;
1016
1017         if (!buffer_mapped(&bh) && write) {
1018                 if (get_block(inode, block, &bh, 1) != 0)
1019                         return VM_FAULT_SIGBUS;
1020                 alloc = true;
1021                 WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
1022         }
1023
1024         bdev = bh.b_bdev;
1025
1026         /*
1027          * If the filesystem isn't willing to tell us the length of a hole,
1028          * just fall back to PTEs.  Calling get_block 512 times in a loop
1029          * would be silly.
1030          */
1031         if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
1032                 dax_pmd_dbg(&bh, address, "allocated block too small");
1033                 return VM_FAULT_FALLBACK;
1034         }
1035
1036         /*
1037          * If we allocated new storage, make sure no process has any
1038          * zero pages covering this hole
1039          */
1040         if (alloc) {
1041                 loff_t lstart = pgoff << PAGE_SHIFT;
1042                 loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
1043
1044                 truncate_pagecache_range(inode, lstart, lend);
1045         }
1046
1047         if (!write && !buffer_mapped(&bh)) {
1048                 spinlock_t *ptl;
1049                 pmd_t entry;
1050                 struct page *zero_page = get_huge_zero_page();
1051
1052                 if (unlikely(!zero_page)) {
1053                         dax_pmd_dbg(&bh, address, "no zero page");
1054                         goto fallback;
1055                 }
1056
1057                 ptl = pmd_lock(vma->vm_mm, pmd);
1058                 if (!pmd_none(*pmd)) {
1059                         spin_unlock(ptl);
1060                         dax_pmd_dbg(&bh, address, "pmd already present");
1061                         goto fallback;
1062                 }
1063
1064                 dev_dbg(part_to_dev(bdev->bd_part),
1065                                 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
1066                                 __func__, current->comm, address,
1067                                 (unsigned long long) to_sector(&bh, inode));
1068
1069                 entry = mk_pmd(zero_page, vma->vm_page_prot);
1070                 entry = pmd_mkhuge(entry);
1071                 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
1072                 result = VM_FAULT_NOPAGE;
1073                 spin_unlock(ptl);
1074         } else {
1075                 struct blk_dax_ctl dax = {
1076                         .sector = to_sector(&bh, inode),
1077                         .size = PMD_SIZE,
1078                 };
1079                 long length = dax_map_atomic(bdev, &dax);
1080
1081                 if (length < 0) {
1082                         dax_pmd_dbg(&bh, address, "dax-error fallback");
1083                         goto fallback;
1084                 }
1085                 if (length < PMD_SIZE) {
1086                         dax_pmd_dbg(&bh, address, "dax-length too small");
1087                         dax_unmap_atomic(bdev, &dax);
1088                         goto fallback;
1089                 }
1090                 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
1091                         dax_pmd_dbg(&bh, address, "pfn unaligned");
1092                         dax_unmap_atomic(bdev, &dax);
1093                         goto fallback;
1094                 }
1095
1096                 if (!pfn_t_devmap(dax.pfn)) {
1097                         dax_unmap_atomic(bdev, &dax);
1098                         dax_pmd_dbg(&bh, address, "pfn not in memmap");
1099                         goto fallback;
1100                 }
1101                 dax_unmap_atomic(bdev, &dax);
1102
1103                 /*
1104                  * For PTE faults we insert a radix tree entry for reads, and
1105                  * leave it clean.  Then on the first write we dirty the radix
1106                  * tree entry via the dax_pfn_mkwrite() path.  This sequence
1107                  * allows the dax_pfn_mkwrite() call to be simpler and avoid a
1108                  * call into get_block() to translate the pgoff to a sector in
1109                  * order to be able to create a new radix tree entry.
1110                  *
1111                  * The PMD path doesn't have an equivalent to
1112                  * dax_pfn_mkwrite(), though, so for a read followed by a
1113                  * write we traverse all the way through __dax_pmd_fault()
1114                  * twice.  This means we can just skip inserting a radix tree
1115                  * entry completely on the initial read and just wait until
1116                  * the write to insert a dirty entry.
1117                  */
1118                 if (write) {
1119                         /*
1120                          * We should insert radix-tree entry and dirty it here.
1121                          * For now this is broken...
1122                          */
1123                 }
1124
1125                 dev_dbg(part_to_dev(bdev->bd_part),
1126                                 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
1127                                 __func__, current->comm, address,
1128                                 pfn_t_to_pfn(dax.pfn),
1129                                 (unsigned long long) dax.sector);
1130                 result |= vmf_insert_pfn_pmd(vma, address, pmd,
1131                                 dax.pfn, write);
1132         }
1133
1134  out:
1135         return result;
1136
1137  fallback:
1138         count_vm_event(THP_FAULT_FALLBACK);
1139         result = VM_FAULT_FALLBACK;
1140         goto out;
1141 }
1142 EXPORT_SYMBOL_GPL(__dax_pmd_fault);
1143
1144 /**
1145  * dax_pmd_fault - handle a PMD fault on a DAX file
1146  * @vma: The virtual memory area where the fault occurred
1147  * @vmf: The description of the fault
1148  * @get_block: The filesystem method used to translate file offsets to blocks
1149  *
1150  * When a page fault occurs, filesystems may call this helper in their
1151  * pmd_fault handler for DAX files.
1152  */
1153 int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1154                         pmd_t *pmd, unsigned int flags, get_block_t get_block)
1155 {
1156         int result;
1157         struct super_block *sb = file_inode(vma->vm_file)->i_sb;
1158
1159         if (flags & FAULT_FLAG_WRITE) {
1160                 sb_start_pagefault(sb);
1161                 file_update_time(vma->vm_file);
1162         }
1163         result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
1164         if (flags & FAULT_FLAG_WRITE)
1165                 sb_end_pagefault(sb);
1166
1167         return result;
1168 }
1169 EXPORT_SYMBOL_GPL(dax_pmd_fault);
1170 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1171
1172 /**
1173  * dax_pfn_mkwrite - handle first write to DAX page
1174  * @vma: The virtual memory area where the fault occurred
1175  * @vmf: The description of the fault
1176  */
1177 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1178 {
1179         struct file *file = vma->vm_file;
1180         struct address_space *mapping = file->f_mapping;
1181         void *entry;
1182         pgoff_t index = vmf->pgoff;
1183
1184         spin_lock_irq(&mapping->tree_lock);
1185         entry = get_unlocked_mapping_entry(mapping, index, NULL);
1186         if (!entry || !radix_tree_exceptional_entry(entry))
1187                 goto out;
1188         radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
1189         put_unlocked_mapping_entry(mapping, index, entry);
1190 out:
1191         spin_unlock_irq(&mapping->tree_lock);
1192         return VM_FAULT_NOPAGE;
1193 }
1194 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1195
1196 static bool dax_range_is_aligned(struct block_device *bdev,
1197                                  unsigned int offset, unsigned int length)
1198 {
1199         unsigned short sector_size = bdev_logical_block_size(bdev);
1200
1201         if (!IS_ALIGNED(offset, sector_size))
1202                 return false;
1203         if (!IS_ALIGNED(length, sector_size))
1204                 return false;
1205
1206         return true;
1207 }
1208
1209 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
1210                 unsigned int offset, unsigned int length)
1211 {
1212         struct blk_dax_ctl dax = {
1213                 .sector         = sector,
1214                 .size           = PAGE_SIZE,
1215         };
1216
1217         if (dax_range_is_aligned(bdev, offset, length)) {
1218                 sector_t start_sector = dax.sector + (offset >> 9);
1219
1220                 return blkdev_issue_zeroout(bdev, start_sector,
1221                                 length >> 9, GFP_NOFS, true);
1222         } else {
1223                 if (dax_map_atomic(bdev, &dax) < 0)
1224                         return PTR_ERR(dax.addr);
1225                 clear_pmem(dax.addr + offset, length);
1226                 dax_unmap_atomic(bdev, &dax);
1227         }
1228         return 0;
1229 }
1230 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1231
1232 /**
1233  * dax_zero_page_range - zero a range within a page of a DAX file
1234  * @inode: The file being truncated
1235  * @from: The file offset that is being truncated to
1236  * @length: The number of bytes to zero
1237  * @get_block: The filesystem method used to translate file offsets to blocks
1238  *
1239  * This function can be called by a filesystem when it is zeroing part of a
1240  * page in a DAX file.  This is intended for hole-punch operations.  If
1241  * you are truncating a file, the helper function dax_truncate_page() may be
1242  * more convenient.
1243  */
1244 int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1245                                                         get_block_t get_block)
1246 {
1247         struct buffer_head bh;
1248         pgoff_t index = from >> PAGE_SHIFT;
1249         unsigned offset = from & (PAGE_SIZE-1);
1250         int err;
1251
1252         /* Block boundary? Nothing to do */
1253         if (!length)
1254                 return 0;
1255         BUG_ON((offset + length) > PAGE_SIZE);
1256
1257         memset(&bh, 0, sizeof(bh));
1258         bh.b_bdev = inode->i_sb->s_bdev;
1259         bh.b_size = PAGE_SIZE;
1260         err = get_block(inode, index, &bh, 0);
1261         if (err < 0 || !buffer_written(&bh))
1262                 return err;
1263
1264         return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
1265                         offset, length);
1266 }
1267 EXPORT_SYMBOL_GPL(dax_zero_page_range);
1268
1269 /**
1270  * dax_truncate_page - handle a partial page being truncated in a DAX file
1271  * @inode: The file being truncated
1272  * @from: The file offset that is being truncated to
1273  * @get_block: The filesystem method used to translate file offsets to blocks
1274  *
1275  * Similar to block_truncate_page(), this function can be called by a
1276  * filesystem when it is truncating a DAX file to handle the partial page.
1277  */
1278 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1279 {
1280         unsigned length = PAGE_ALIGN(from) - from;
1281         return dax_zero_page_range(inode, from, length, get_block);
1282 }
1283 EXPORT_SYMBOL_GPL(dax_truncate_page);