Merge branch 'chandan/prep-subpage-blocksize' into for-chris-4.6
[cascardo/linux.git] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
33 #include <linux/btrfs.h>
34 #include <linux/uio.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "print-tree.h"
40 #include "tree-log.h"
41 #include "locking.h"
42 #include "volumes.h"
43 #include "qgroup.h"
44
45 static struct kmem_cache *btrfs_inode_defrag_cachep;
46 /*
47  * when auto defrag is enabled we
48  * queue up these defrag structs to remember which
49  * inodes need defragging passes
50  */
51 struct inode_defrag {
52         struct rb_node rb_node;
53         /* objectid */
54         u64 ino;
55         /*
56          * transid where the defrag was added, we search for
57          * extents newer than this
58          */
59         u64 transid;
60
61         /* root objectid */
62         u64 root;
63
64         /* last offset we were able to defrag */
65         u64 last_offset;
66
67         /* if we've wrapped around back to zero once already */
68         int cycled;
69 };
70
71 static int __compare_inode_defrag(struct inode_defrag *defrag1,
72                                   struct inode_defrag *defrag2)
73 {
74         if (defrag1->root > defrag2->root)
75                 return 1;
76         else if (defrag1->root < defrag2->root)
77                 return -1;
78         else if (defrag1->ino > defrag2->ino)
79                 return 1;
80         else if (defrag1->ino < defrag2->ino)
81                 return -1;
82         else
83                 return 0;
84 }
85
86 /* pop a record for an inode into the defrag tree.  The lock
87  * must be held already
88  *
89  * If you're inserting a record for an older transid than an
90  * existing record, the transid already in the tree is lowered
91  *
92  * If an existing record is found the defrag item you
93  * pass in is freed
94  */
95 static int __btrfs_add_inode_defrag(struct inode *inode,
96                                     struct inode_defrag *defrag)
97 {
98         struct btrfs_root *root = BTRFS_I(inode)->root;
99         struct inode_defrag *entry;
100         struct rb_node **p;
101         struct rb_node *parent = NULL;
102         int ret;
103
104         p = &root->fs_info->defrag_inodes.rb_node;
105         while (*p) {
106                 parent = *p;
107                 entry = rb_entry(parent, struct inode_defrag, rb_node);
108
109                 ret = __compare_inode_defrag(defrag, entry);
110                 if (ret < 0)
111                         p = &parent->rb_left;
112                 else if (ret > 0)
113                         p = &parent->rb_right;
114                 else {
115                         /* if we're reinserting an entry for
116                          * an old defrag run, make sure to
117                          * lower the transid of our existing record
118                          */
119                         if (defrag->transid < entry->transid)
120                                 entry->transid = defrag->transid;
121                         if (defrag->last_offset > entry->last_offset)
122                                 entry->last_offset = defrag->last_offset;
123                         return -EEXIST;
124                 }
125         }
126         set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
127         rb_link_node(&defrag->rb_node, parent, p);
128         rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
129         return 0;
130 }
131
132 static inline int __need_auto_defrag(struct btrfs_root *root)
133 {
134         if (!btrfs_test_opt(root, AUTO_DEFRAG))
135                 return 0;
136
137         if (btrfs_fs_closing(root->fs_info))
138                 return 0;
139
140         return 1;
141 }
142
143 /*
144  * insert a defrag record for this inode if auto defrag is
145  * enabled
146  */
147 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
148                            struct inode *inode)
149 {
150         struct btrfs_root *root = BTRFS_I(inode)->root;
151         struct inode_defrag *defrag;
152         u64 transid;
153         int ret;
154
155         if (!__need_auto_defrag(root))
156                 return 0;
157
158         if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
159                 return 0;
160
161         if (trans)
162                 transid = trans->transid;
163         else
164                 transid = BTRFS_I(inode)->root->last_trans;
165
166         defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
167         if (!defrag)
168                 return -ENOMEM;
169
170         defrag->ino = btrfs_ino(inode);
171         defrag->transid = transid;
172         defrag->root = root->root_key.objectid;
173
174         spin_lock(&root->fs_info->defrag_inodes_lock);
175         if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
176                 /*
177                  * If we set IN_DEFRAG flag and evict the inode from memory,
178                  * and then re-read this inode, this new inode doesn't have
179                  * IN_DEFRAG flag. At the case, we may find the existed defrag.
180                  */
181                 ret = __btrfs_add_inode_defrag(inode, defrag);
182                 if (ret)
183                         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
184         } else {
185                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
186         }
187         spin_unlock(&root->fs_info->defrag_inodes_lock);
188         return 0;
189 }
190
191 /*
192  * Requeue the defrag object. If there is a defrag object that points to
193  * the same inode in the tree, we will merge them together (by
194  * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
195  */
196 static void btrfs_requeue_inode_defrag(struct inode *inode,
197                                        struct inode_defrag *defrag)
198 {
199         struct btrfs_root *root = BTRFS_I(inode)->root;
200         int ret;
201
202         if (!__need_auto_defrag(root))
203                 goto out;
204
205         /*
206          * Here we don't check the IN_DEFRAG flag, because we need merge
207          * them together.
208          */
209         spin_lock(&root->fs_info->defrag_inodes_lock);
210         ret = __btrfs_add_inode_defrag(inode, defrag);
211         spin_unlock(&root->fs_info->defrag_inodes_lock);
212         if (ret)
213                 goto out;
214         return;
215 out:
216         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
217 }
218
219 /*
220  * pick the defragable inode that we want, if it doesn't exist, we will get
221  * the next one.
222  */
223 static struct inode_defrag *
224 btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
225 {
226         struct inode_defrag *entry = NULL;
227         struct inode_defrag tmp;
228         struct rb_node *p;
229         struct rb_node *parent = NULL;
230         int ret;
231
232         tmp.ino = ino;
233         tmp.root = root;
234
235         spin_lock(&fs_info->defrag_inodes_lock);
236         p = fs_info->defrag_inodes.rb_node;
237         while (p) {
238                 parent = p;
239                 entry = rb_entry(parent, struct inode_defrag, rb_node);
240
241                 ret = __compare_inode_defrag(&tmp, entry);
242                 if (ret < 0)
243                         p = parent->rb_left;
244                 else if (ret > 0)
245                         p = parent->rb_right;
246                 else
247                         goto out;
248         }
249
250         if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
251                 parent = rb_next(parent);
252                 if (parent)
253                         entry = rb_entry(parent, struct inode_defrag, rb_node);
254                 else
255                         entry = NULL;
256         }
257 out:
258         if (entry)
259                 rb_erase(parent, &fs_info->defrag_inodes);
260         spin_unlock(&fs_info->defrag_inodes_lock);
261         return entry;
262 }
263
264 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
265 {
266         struct inode_defrag *defrag;
267         struct rb_node *node;
268
269         spin_lock(&fs_info->defrag_inodes_lock);
270         node = rb_first(&fs_info->defrag_inodes);
271         while (node) {
272                 rb_erase(node, &fs_info->defrag_inodes);
273                 defrag = rb_entry(node, struct inode_defrag, rb_node);
274                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
275
276                 cond_resched_lock(&fs_info->defrag_inodes_lock);
277
278                 node = rb_first(&fs_info->defrag_inodes);
279         }
280         spin_unlock(&fs_info->defrag_inodes_lock);
281 }
282
283 #define BTRFS_DEFRAG_BATCH      1024
284
285 static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
286                                     struct inode_defrag *defrag)
287 {
288         struct btrfs_root *inode_root;
289         struct inode *inode;
290         struct btrfs_key key;
291         struct btrfs_ioctl_defrag_range_args range;
292         int num_defrag;
293         int index;
294         int ret;
295
296         /* get the inode */
297         key.objectid = defrag->root;
298         key.type = BTRFS_ROOT_ITEM_KEY;
299         key.offset = (u64)-1;
300
301         index = srcu_read_lock(&fs_info->subvol_srcu);
302
303         inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
304         if (IS_ERR(inode_root)) {
305                 ret = PTR_ERR(inode_root);
306                 goto cleanup;
307         }
308
309         key.objectid = defrag->ino;
310         key.type = BTRFS_INODE_ITEM_KEY;
311         key.offset = 0;
312         inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
313         if (IS_ERR(inode)) {
314                 ret = PTR_ERR(inode);
315                 goto cleanup;
316         }
317         srcu_read_unlock(&fs_info->subvol_srcu, index);
318
319         /* do a chunk of defrag */
320         clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
321         memset(&range, 0, sizeof(range));
322         range.len = (u64)-1;
323         range.start = defrag->last_offset;
324
325         sb_start_write(fs_info->sb);
326         num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
327                                        BTRFS_DEFRAG_BATCH);
328         sb_end_write(fs_info->sb);
329         /*
330          * if we filled the whole defrag batch, there
331          * must be more work to do.  Queue this defrag
332          * again
333          */
334         if (num_defrag == BTRFS_DEFRAG_BATCH) {
335                 defrag->last_offset = range.start;
336                 btrfs_requeue_inode_defrag(inode, defrag);
337         } else if (defrag->last_offset && !defrag->cycled) {
338                 /*
339                  * we didn't fill our defrag batch, but
340                  * we didn't start at zero.  Make sure we loop
341                  * around to the start of the file.
342                  */
343                 defrag->last_offset = 0;
344                 defrag->cycled = 1;
345                 btrfs_requeue_inode_defrag(inode, defrag);
346         } else {
347                 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
348         }
349
350         iput(inode);
351         return 0;
352 cleanup:
353         srcu_read_unlock(&fs_info->subvol_srcu, index);
354         kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
355         return ret;
356 }
357
358 /*
359  * run through the list of inodes in the FS that need
360  * defragging
361  */
362 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
363 {
364         struct inode_defrag *defrag;
365         u64 first_ino = 0;
366         u64 root_objectid = 0;
367
368         atomic_inc(&fs_info->defrag_running);
369         while (1) {
370                 /* Pause the auto defragger. */
371                 if (test_bit(BTRFS_FS_STATE_REMOUNTING,
372                              &fs_info->fs_state))
373                         break;
374
375                 if (!__need_auto_defrag(fs_info->tree_root))
376                         break;
377
378                 /* find an inode to defrag */
379                 defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
380                                                  first_ino);
381                 if (!defrag) {
382                         if (root_objectid || first_ino) {
383                                 root_objectid = 0;
384                                 first_ino = 0;
385                                 continue;
386                         } else {
387                                 break;
388                         }
389                 }
390
391                 first_ino = defrag->ino + 1;
392                 root_objectid = defrag->root;
393
394                 __btrfs_run_defrag_inode(fs_info, defrag);
395         }
396         atomic_dec(&fs_info->defrag_running);
397
398         /*
399          * during unmount, we use the transaction_wait queue to
400          * wait for the defragger to stop
401          */
402         wake_up(&fs_info->transaction_wait);
403         return 0;
404 }
405
406 /* simple helper to fault in pages and copy.  This should go away
407  * and be replaced with calls into generic code.
408  */
409 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
410                                          struct page **prepared_pages,
411                                          struct iov_iter *i)
412 {
413         size_t copied = 0;
414         size_t total_copied = 0;
415         int pg = 0;
416         int offset = pos & (PAGE_CACHE_SIZE - 1);
417
418         while (write_bytes > 0) {
419                 size_t count = min_t(size_t,
420                                      PAGE_CACHE_SIZE - offset, write_bytes);
421                 struct page *page = prepared_pages[pg];
422                 /*
423                  * Copy data from userspace to the current page
424                  */
425                 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
426
427                 /* Flush processor's dcache for this page */
428                 flush_dcache_page(page);
429
430                 /*
431                  * if we get a partial write, we can end up with
432                  * partially up to date pages.  These add
433                  * a lot of complexity, so make sure they don't
434                  * happen by forcing this copy to be retried.
435                  *
436                  * The rest of the btrfs_file_write code will fall
437                  * back to page at a time copies after we return 0.
438                  */
439                 if (!PageUptodate(page) && copied < count)
440                         copied = 0;
441
442                 iov_iter_advance(i, copied);
443                 write_bytes -= copied;
444                 total_copied += copied;
445
446                 /* Return to btrfs_file_write_iter to fault page */
447                 if (unlikely(copied == 0))
448                         break;
449
450                 if (copied < PAGE_CACHE_SIZE - offset) {
451                         offset += copied;
452                 } else {
453                         pg++;
454                         offset = 0;
455                 }
456         }
457         return total_copied;
458 }
459
460 /*
461  * unlocks pages after btrfs_file_write is done with them
462  */
463 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
464 {
465         size_t i;
466         for (i = 0; i < num_pages; i++) {
467                 /* page checked is some magic around finding pages that
468                  * have been modified without going through btrfs_set_page_dirty
469                  * clear it here. There should be no need to mark the pages
470                  * accessed as prepare_pages should have marked them accessed
471                  * in prepare_pages via find_or_create_page()
472                  */
473                 ClearPageChecked(pages[i]);
474                 unlock_page(pages[i]);
475                 page_cache_release(pages[i]);
476         }
477 }
478
479 /*
480  * after copy_from_user, pages need to be dirtied and we need to make
481  * sure holes are created between the current EOF and the start of
482  * any next extents (if required).
483  *
484  * this also makes the decision about creating an inline extent vs
485  * doing real data extents, marking pages dirty and delalloc as required.
486  */
487 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
488                              struct page **pages, size_t num_pages,
489                              loff_t pos, size_t write_bytes,
490                              struct extent_state **cached)
491 {
492         int err = 0;
493         int i;
494         u64 num_bytes;
495         u64 start_pos;
496         u64 end_of_last_block;
497         u64 end_pos = pos + write_bytes;
498         loff_t isize = i_size_read(inode);
499
500         start_pos = pos & ~((u64)root->sectorsize - 1);
501         num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
502
503         end_of_last_block = start_pos + num_bytes - 1;
504         err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
505                                         cached);
506         if (err)
507                 return err;
508
509         for (i = 0; i < num_pages; i++) {
510                 struct page *p = pages[i];
511                 SetPageUptodate(p);
512                 ClearPageChecked(p);
513                 set_page_dirty(p);
514         }
515
516         /*
517          * we've only changed i_size in ram, and we haven't updated
518          * the disk i_size.  There is no need to log the inode
519          * at this time.
520          */
521         if (end_pos > isize)
522                 i_size_write(inode, end_pos);
523         return 0;
524 }
525
526 /*
527  * this drops all the extents in the cache that intersect the range
528  * [start, end].  Existing extents are split as required.
529  */
530 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
531                              int skip_pinned)
532 {
533         struct extent_map *em;
534         struct extent_map *split = NULL;
535         struct extent_map *split2 = NULL;
536         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
537         u64 len = end - start + 1;
538         u64 gen;
539         int ret;
540         int testend = 1;
541         unsigned long flags;
542         int compressed = 0;
543         bool modified;
544
545         WARN_ON(end < start);
546         if (end == (u64)-1) {
547                 len = (u64)-1;
548                 testend = 0;
549         }
550         while (1) {
551                 int no_splits = 0;
552
553                 modified = false;
554                 if (!split)
555                         split = alloc_extent_map();
556                 if (!split2)
557                         split2 = alloc_extent_map();
558                 if (!split || !split2)
559                         no_splits = 1;
560
561                 write_lock(&em_tree->lock);
562                 em = lookup_extent_mapping(em_tree, start, len);
563                 if (!em) {
564                         write_unlock(&em_tree->lock);
565                         break;
566                 }
567                 flags = em->flags;
568                 gen = em->generation;
569                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
570                         if (testend && em->start + em->len >= start + len) {
571                                 free_extent_map(em);
572                                 write_unlock(&em_tree->lock);
573                                 break;
574                         }
575                         start = em->start + em->len;
576                         if (testend)
577                                 len = start + len - (em->start + em->len);
578                         free_extent_map(em);
579                         write_unlock(&em_tree->lock);
580                         continue;
581                 }
582                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
583                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
584                 clear_bit(EXTENT_FLAG_LOGGING, &flags);
585                 modified = !list_empty(&em->list);
586                 if (no_splits)
587                         goto next;
588
589                 if (em->start < start) {
590                         split->start = em->start;
591                         split->len = start - em->start;
592
593                         if (em->block_start < EXTENT_MAP_LAST_BYTE) {
594                                 split->orig_start = em->orig_start;
595                                 split->block_start = em->block_start;
596
597                                 if (compressed)
598                                         split->block_len = em->block_len;
599                                 else
600                                         split->block_len = split->len;
601                                 split->orig_block_len = max(split->block_len,
602                                                 em->orig_block_len);
603                                 split->ram_bytes = em->ram_bytes;
604                         } else {
605                                 split->orig_start = split->start;
606                                 split->block_len = 0;
607                                 split->block_start = em->block_start;
608                                 split->orig_block_len = 0;
609                                 split->ram_bytes = split->len;
610                         }
611
612                         split->generation = gen;
613                         split->bdev = em->bdev;
614                         split->flags = flags;
615                         split->compress_type = em->compress_type;
616                         replace_extent_mapping(em_tree, em, split, modified);
617                         free_extent_map(split);
618                         split = split2;
619                         split2 = NULL;
620                 }
621                 if (testend && em->start + em->len > start + len) {
622                         u64 diff = start + len - em->start;
623
624                         split->start = start + len;
625                         split->len = em->start + em->len - (start + len);
626                         split->bdev = em->bdev;
627                         split->flags = flags;
628                         split->compress_type = em->compress_type;
629                         split->generation = gen;
630
631                         if (em->block_start < EXTENT_MAP_LAST_BYTE) {
632                                 split->orig_block_len = max(em->block_len,
633                                                     em->orig_block_len);
634
635                                 split->ram_bytes = em->ram_bytes;
636                                 if (compressed) {
637                                         split->block_len = em->block_len;
638                                         split->block_start = em->block_start;
639                                         split->orig_start = em->orig_start;
640                                 } else {
641                                         split->block_len = split->len;
642                                         split->block_start = em->block_start
643                                                 + diff;
644                                         split->orig_start = em->orig_start;
645                                 }
646                         } else {
647                                 split->ram_bytes = split->len;
648                                 split->orig_start = split->start;
649                                 split->block_len = 0;
650                                 split->block_start = em->block_start;
651                                 split->orig_block_len = 0;
652                         }
653
654                         if (extent_map_in_tree(em)) {
655                                 replace_extent_mapping(em_tree, em, split,
656                                                        modified);
657                         } else {
658                                 ret = add_extent_mapping(em_tree, split,
659                                                          modified);
660                                 ASSERT(ret == 0); /* Logic error */
661                         }
662                         free_extent_map(split);
663                         split = NULL;
664                 }
665 next:
666                 if (extent_map_in_tree(em))
667                         remove_extent_mapping(em_tree, em);
668                 write_unlock(&em_tree->lock);
669
670                 /* once for us */
671                 free_extent_map(em);
672                 /* once for the tree*/
673                 free_extent_map(em);
674         }
675         if (split)
676                 free_extent_map(split);
677         if (split2)
678                 free_extent_map(split2);
679 }
680
681 /*
682  * this is very complex, but the basic idea is to drop all extents
683  * in the range start - end.  hint_block is filled in with a block number
684  * that would be a good hint to the block allocator for this file.
685  *
686  * If an extent intersects the range but is not entirely inside the range
687  * it is either truncated or split.  Anything entirely inside the range
688  * is deleted from the tree.
689  */
690 int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
691                          struct btrfs_root *root, struct inode *inode,
692                          struct btrfs_path *path, u64 start, u64 end,
693                          u64 *drop_end, int drop_cache,
694                          int replace_extent,
695                          u32 extent_item_size,
696                          int *key_inserted)
697 {
698         struct extent_buffer *leaf;
699         struct btrfs_file_extent_item *fi;
700         struct btrfs_key key;
701         struct btrfs_key new_key;
702         u64 ino = btrfs_ino(inode);
703         u64 search_start = start;
704         u64 disk_bytenr = 0;
705         u64 num_bytes = 0;
706         u64 extent_offset = 0;
707         u64 extent_end = 0;
708         int del_nr = 0;
709         int del_slot = 0;
710         int extent_type;
711         int recow;
712         int ret;
713         int modify_tree = -1;
714         int update_refs;
715         int found = 0;
716         int leafs_visited = 0;
717
718         if (drop_cache)
719                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
720
721         if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
722                 modify_tree = 0;
723
724         update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
725                        root == root->fs_info->tree_root);
726         while (1) {
727                 recow = 0;
728                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
729                                                search_start, modify_tree);
730                 if (ret < 0)
731                         break;
732                 if (ret > 0 && path->slots[0] > 0 && search_start == start) {
733                         leaf = path->nodes[0];
734                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
735                         if (key.objectid == ino &&
736                             key.type == BTRFS_EXTENT_DATA_KEY)
737                                 path->slots[0]--;
738                 }
739                 ret = 0;
740                 leafs_visited++;
741 next_slot:
742                 leaf = path->nodes[0];
743                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
744                         BUG_ON(del_nr > 0);
745                         ret = btrfs_next_leaf(root, path);
746                         if (ret < 0)
747                                 break;
748                         if (ret > 0) {
749                                 ret = 0;
750                                 break;
751                         }
752                         leafs_visited++;
753                         leaf = path->nodes[0];
754                         recow = 1;
755                 }
756
757                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
758
759                 if (key.objectid > ino)
760                         break;
761                 if (WARN_ON_ONCE(key.objectid < ino) ||
762                     key.type < BTRFS_EXTENT_DATA_KEY) {
763                         ASSERT(del_nr == 0);
764                         path->slots[0]++;
765                         goto next_slot;
766                 }
767                 if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
768                         break;
769
770                 fi = btrfs_item_ptr(leaf, path->slots[0],
771                                     struct btrfs_file_extent_item);
772                 extent_type = btrfs_file_extent_type(leaf, fi);
773
774                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
775                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
776                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
777                         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
778                         extent_offset = btrfs_file_extent_offset(leaf, fi);
779                         extent_end = key.offset +
780                                 btrfs_file_extent_num_bytes(leaf, fi);
781                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
782                         extent_end = key.offset +
783                                 btrfs_file_extent_inline_len(leaf,
784                                                      path->slots[0], fi);
785                 } else {
786                         /* can't happen */
787                         BUG();
788                 }
789
790                 /*
791                  * Don't skip extent items representing 0 byte lengths. They
792                  * used to be created (bug) if while punching holes we hit
793                  * -ENOSPC condition. So if we find one here, just ensure we
794                  * delete it, otherwise we would insert a new file extent item
795                  * with the same key (offset) as that 0 bytes length file
796                  * extent item in the call to setup_items_for_insert() later
797                  * in this function.
798                  */
799                 if (extent_end == key.offset && extent_end >= search_start)
800                         goto delete_extent_item;
801
802                 if (extent_end <= search_start) {
803                         path->slots[0]++;
804                         goto next_slot;
805                 }
806
807                 found = 1;
808                 search_start = max(key.offset, start);
809                 if (recow || !modify_tree) {
810                         modify_tree = -1;
811                         btrfs_release_path(path);
812                         continue;
813                 }
814
815                 /*
816                  *     | - range to drop - |
817                  *  | -------- extent -------- |
818                  */
819                 if (start > key.offset && end < extent_end) {
820                         BUG_ON(del_nr > 0);
821                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
822                                 ret = -EOPNOTSUPP;
823                                 break;
824                         }
825
826                         memcpy(&new_key, &key, sizeof(new_key));
827                         new_key.offset = start;
828                         ret = btrfs_duplicate_item(trans, root, path,
829                                                    &new_key);
830                         if (ret == -EAGAIN) {
831                                 btrfs_release_path(path);
832                                 continue;
833                         }
834                         if (ret < 0)
835                                 break;
836
837                         leaf = path->nodes[0];
838                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
839                                             struct btrfs_file_extent_item);
840                         btrfs_set_file_extent_num_bytes(leaf, fi,
841                                                         start - key.offset);
842
843                         fi = btrfs_item_ptr(leaf, path->slots[0],
844                                             struct btrfs_file_extent_item);
845
846                         extent_offset += start - key.offset;
847                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
848                         btrfs_set_file_extent_num_bytes(leaf, fi,
849                                                         extent_end - start);
850                         btrfs_mark_buffer_dirty(leaf);
851
852                         if (update_refs && disk_bytenr > 0) {
853                                 ret = btrfs_inc_extent_ref(trans, root,
854                                                 disk_bytenr, num_bytes, 0,
855                                                 root->root_key.objectid,
856                                                 new_key.objectid,
857                                                 start - extent_offset);
858                                 BUG_ON(ret); /* -ENOMEM */
859                         }
860                         key.offset = start;
861                 }
862                 /*
863                  *  | ---- range to drop ----- |
864                  *      | -------- extent -------- |
865                  */
866                 if (start <= key.offset && end < extent_end) {
867                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
868                                 ret = -EOPNOTSUPP;
869                                 break;
870                         }
871
872                         memcpy(&new_key, &key, sizeof(new_key));
873                         new_key.offset = end;
874                         btrfs_set_item_key_safe(root->fs_info, path, &new_key);
875
876                         extent_offset += end - key.offset;
877                         btrfs_set_file_extent_offset(leaf, fi, extent_offset);
878                         btrfs_set_file_extent_num_bytes(leaf, fi,
879                                                         extent_end - end);
880                         btrfs_mark_buffer_dirty(leaf);
881                         if (update_refs && disk_bytenr > 0)
882                                 inode_sub_bytes(inode, end - key.offset);
883                         break;
884                 }
885
886                 search_start = extent_end;
887                 /*
888                  *       | ---- range to drop ----- |
889                  *  | -------- extent -------- |
890                  */
891                 if (start > key.offset && end >= extent_end) {
892                         BUG_ON(del_nr > 0);
893                         if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
894                                 ret = -EOPNOTSUPP;
895                                 break;
896                         }
897
898                         btrfs_set_file_extent_num_bytes(leaf, fi,
899                                                         start - key.offset);
900                         btrfs_mark_buffer_dirty(leaf);
901                         if (update_refs && disk_bytenr > 0)
902                                 inode_sub_bytes(inode, extent_end - start);
903                         if (end == extent_end)
904                                 break;
905
906                         path->slots[0]++;
907                         goto next_slot;
908                 }
909
910                 /*
911                  *  | ---- range to drop ----- |
912                  *    | ------ extent ------ |
913                  */
914                 if (start <= key.offset && end >= extent_end) {
915 delete_extent_item:
916                         if (del_nr == 0) {
917                                 del_slot = path->slots[0];
918                                 del_nr = 1;
919                         } else {
920                                 BUG_ON(del_slot + del_nr != path->slots[0]);
921                                 del_nr++;
922                         }
923
924                         if (update_refs &&
925                             extent_type == BTRFS_FILE_EXTENT_INLINE) {
926                                 inode_sub_bytes(inode,
927                                                 extent_end - key.offset);
928                                 extent_end = ALIGN(extent_end,
929                                                    root->sectorsize);
930                         } else if (update_refs && disk_bytenr > 0) {
931                                 ret = btrfs_free_extent(trans, root,
932                                                 disk_bytenr, num_bytes, 0,
933                                                 root->root_key.objectid,
934                                                 key.objectid, key.offset -
935                                                 extent_offset);
936                                 BUG_ON(ret); /* -ENOMEM */
937                                 inode_sub_bytes(inode,
938                                                 extent_end - key.offset);
939                         }
940
941                         if (end == extent_end)
942                                 break;
943
944                         if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
945                                 path->slots[0]++;
946                                 goto next_slot;
947                         }
948
949                         ret = btrfs_del_items(trans, root, path, del_slot,
950                                               del_nr);
951                         if (ret) {
952                                 btrfs_abort_transaction(trans, root, ret);
953                                 break;
954                         }
955
956                         del_nr = 0;
957                         del_slot = 0;
958
959                         btrfs_release_path(path);
960                         continue;
961                 }
962
963                 BUG_ON(1);
964         }
965
966         if (!ret && del_nr > 0) {
967                 /*
968                  * Set path->slots[0] to first slot, so that after the delete
969                  * if items are move off from our leaf to its immediate left or
970                  * right neighbor leafs, we end up with a correct and adjusted
971                  * path->slots[0] for our insertion (if replace_extent != 0).
972                  */
973                 path->slots[0] = del_slot;
974                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
975                 if (ret)
976                         btrfs_abort_transaction(trans, root, ret);
977         }
978
979         leaf = path->nodes[0];
980         /*
981          * If btrfs_del_items() was called, it might have deleted a leaf, in
982          * which case it unlocked our path, so check path->locks[0] matches a
983          * write lock.
984          */
985         if (!ret && replace_extent && leafs_visited == 1 &&
986             (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
987              path->locks[0] == BTRFS_WRITE_LOCK) &&
988             btrfs_leaf_free_space(root, leaf) >=
989             sizeof(struct btrfs_item) + extent_item_size) {
990
991                 key.objectid = ino;
992                 key.type = BTRFS_EXTENT_DATA_KEY;
993                 key.offset = start;
994                 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
995                         struct btrfs_key slot_key;
996
997                         btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
998                         if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
999                                 path->slots[0]++;
1000                 }
1001                 setup_items_for_insert(root, path, &key,
1002                                        &extent_item_size,
1003                                        extent_item_size,
1004                                        sizeof(struct btrfs_item) +
1005                                        extent_item_size, 1);
1006                 *key_inserted = 1;
1007         }
1008
1009         if (!replace_extent || !(*key_inserted))
1010                 btrfs_release_path(path);
1011         if (drop_end)
1012                 *drop_end = found ? min(end, extent_end) : end;
1013         return ret;
1014 }
1015
1016 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1017                        struct btrfs_root *root, struct inode *inode, u64 start,
1018                        u64 end, int drop_cache)
1019 {
1020         struct btrfs_path *path;
1021         int ret;
1022
1023         path = btrfs_alloc_path();
1024         if (!path)
1025                 return -ENOMEM;
1026         ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
1027                                    drop_cache, 0, 0, NULL);
1028         btrfs_free_path(path);
1029         return ret;
1030 }
1031
1032 static int extent_mergeable(struct extent_buffer *leaf, int slot,
1033                             u64 objectid, u64 bytenr, u64 orig_offset,
1034                             u64 *start, u64 *end)
1035 {
1036         struct btrfs_file_extent_item *fi;
1037         struct btrfs_key key;
1038         u64 extent_end;
1039
1040         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
1041                 return 0;
1042
1043         btrfs_item_key_to_cpu(leaf, &key, slot);
1044         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
1045                 return 0;
1046
1047         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1048         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
1049             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
1050             btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
1051             btrfs_file_extent_compression(leaf, fi) ||
1052             btrfs_file_extent_encryption(leaf, fi) ||
1053             btrfs_file_extent_other_encoding(leaf, fi))
1054                 return 0;
1055
1056         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1057         if ((*start && *start != key.offset) || (*end && *end != extent_end))
1058                 return 0;
1059
1060         *start = key.offset;
1061         *end = extent_end;
1062         return 1;
1063 }
1064
1065 /*
1066  * Mark extent in the range start - end as written.
1067  *
1068  * This changes extent type from 'pre-allocated' to 'regular'. If only
1069  * part of extent is marked as written, the extent will be split into
1070  * two or three.
1071  */
1072 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1073                               struct inode *inode, u64 start, u64 end)
1074 {
1075         struct btrfs_root *root = BTRFS_I(inode)->root;
1076         struct extent_buffer *leaf;
1077         struct btrfs_path *path;
1078         struct btrfs_file_extent_item *fi;
1079         struct btrfs_key key;
1080         struct btrfs_key new_key;
1081         u64 bytenr;
1082         u64 num_bytes;
1083         u64 extent_end;
1084         u64 orig_offset;
1085         u64 other_start;
1086         u64 other_end;
1087         u64 split;
1088         int del_nr = 0;
1089         int del_slot = 0;
1090         int recow;
1091         int ret;
1092         u64 ino = btrfs_ino(inode);
1093
1094         path = btrfs_alloc_path();
1095         if (!path)
1096                 return -ENOMEM;
1097 again:
1098         recow = 0;
1099         split = start;
1100         key.objectid = ino;
1101         key.type = BTRFS_EXTENT_DATA_KEY;
1102         key.offset = split;
1103
1104         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1105         if (ret < 0)
1106                 goto out;
1107         if (ret > 0 && path->slots[0] > 0)
1108                 path->slots[0]--;
1109
1110         leaf = path->nodes[0];
1111         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1112         BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
1113         fi = btrfs_item_ptr(leaf, path->slots[0],
1114                             struct btrfs_file_extent_item);
1115         BUG_ON(btrfs_file_extent_type(leaf, fi) !=
1116                BTRFS_FILE_EXTENT_PREALLOC);
1117         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1118         BUG_ON(key.offset > start || extent_end < end);
1119
1120         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1121         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1122         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
1123         memcpy(&new_key, &key, sizeof(new_key));
1124
1125         if (start == key.offset && end < extent_end) {
1126                 other_start = 0;
1127                 other_end = start;
1128                 if (extent_mergeable(leaf, path->slots[0] - 1,
1129                                      ino, bytenr, orig_offset,
1130                                      &other_start, &other_end)) {
1131                         new_key.offset = end;
1132                         btrfs_set_item_key_safe(root->fs_info, path, &new_key);
1133                         fi = btrfs_item_ptr(leaf, path->slots[0],
1134                                             struct btrfs_file_extent_item);
1135                         btrfs_set_file_extent_generation(leaf, fi,
1136                                                          trans->transid);
1137                         btrfs_set_file_extent_num_bytes(leaf, fi,
1138                                                         extent_end - end);
1139                         btrfs_set_file_extent_offset(leaf, fi,
1140                                                      end - orig_offset);
1141                         fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1142                                             struct btrfs_file_extent_item);
1143                         btrfs_set_file_extent_generation(leaf, fi,
1144                                                          trans->transid);
1145                         btrfs_set_file_extent_num_bytes(leaf, fi,
1146                                                         end - other_start);
1147                         btrfs_mark_buffer_dirty(leaf);
1148                         goto out;
1149                 }
1150         }
1151
1152         if (start > key.offset && end == extent_end) {
1153                 other_start = end;
1154                 other_end = 0;
1155                 if (extent_mergeable(leaf, path->slots[0] + 1,
1156                                      ino, bytenr, orig_offset,
1157                                      &other_start, &other_end)) {
1158                         fi = btrfs_item_ptr(leaf, path->slots[0],
1159                                             struct btrfs_file_extent_item);
1160                         btrfs_set_file_extent_num_bytes(leaf, fi,
1161                                                         start - key.offset);
1162                         btrfs_set_file_extent_generation(leaf, fi,
1163                                                          trans->transid);
1164                         path->slots[0]++;
1165                         new_key.offset = start;
1166                         btrfs_set_item_key_safe(root->fs_info, path, &new_key);
1167
1168                         fi = btrfs_item_ptr(leaf, path->slots[0],
1169                                             struct btrfs_file_extent_item);
1170                         btrfs_set_file_extent_generation(leaf, fi,
1171                                                          trans->transid);
1172                         btrfs_set_file_extent_num_bytes(leaf, fi,
1173                                                         other_end - start);
1174                         btrfs_set_file_extent_offset(leaf, fi,
1175                                                      start - orig_offset);
1176                         btrfs_mark_buffer_dirty(leaf);
1177                         goto out;
1178                 }
1179         }
1180
1181         while (start > key.offset || end < extent_end) {
1182                 if (key.offset == start)
1183                         split = end;
1184
1185                 new_key.offset = split;
1186                 ret = btrfs_duplicate_item(trans, root, path, &new_key);
1187                 if (ret == -EAGAIN) {
1188                         btrfs_release_path(path);
1189                         goto again;
1190                 }
1191                 if (ret < 0) {
1192                         btrfs_abort_transaction(trans, root, ret);
1193                         goto out;
1194                 }
1195
1196                 leaf = path->nodes[0];
1197                 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
1198                                     struct btrfs_file_extent_item);
1199                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1200                 btrfs_set_file_extent_num_bytes(leaf, fi,
1201                                                 split - key.offset);
1202
1203                 fi = btrfs_item_ptr(leaf, path->slots[0],
1204                                     struct btrfs_file_extent_item);
1205
1206                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1207                 btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
1208                 btrfs_set_file_extent_num_bytes(leaf, fi,
1209                                                 extent_end - split);
1210                 btrfs_mark_buffer_dirty(leaf);
1211
1212                 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
1213                                            root->root_key.objectid,
1214                                            ino, orig_offset);
1215                 BUG_ON(ret); /* -ENOMEM */
1216
1217                 if (split == start) {
1218                         key.offset = start;
1219                 } else {
1220                         BUG_ON(start != key.offset);
1221                         path->slots[0]--;
1222                         extent_end = end;
1223                 }
1224                 recow = 1;
1225         }
1226
1227         other_start = end;
1228         other_end = 0;
1229         if (extent_mergeable(leaf, path->slots[0] + 1,
1230                              ino, bytenr, orig_offset,
1231                              &other_start, &other_end)) {
1232                 if (recow) {
1233                         btrfs_release_path(path);
1234                         goto again;
1235                 }
1236                 extent_end = other_end;
1237                 del_slot = path->slots[0] + 1;
1238                 del_nr++;
1239                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1240                                         0, root->root_key.objectid,
1241                                         ino, orig_offset);
1242                 BUG_ON(ret); /* -ENOMEM */
1243         }
1244         other_start = 0;
1245         other_end = start;
1246         if (extent_mergeable(leaf, path->slots[0] - 1,
1247                              ino, bytenr, orig_offset,
1248                              &other_start, &other_end)) {
1249                 if (recow) {
1250                         btrfs_release_path(path);
1251                         goto again;
1252                 }
1253                 key.offset = other_start;
1254                 del_slot = path->slots[0];
1255                 del_nr++;
1256                 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1257                                         0, root->root_key.objectid,
1258                                         ino, orig_offset);
1259                 BUG_ON(ret); /* -ENOMEM */
1260         }
1261         if (del_nr == 0) {
1262                 fi = btrfs_item_ptr(leaf, path->slots[0],
1263                            struct btrfs_file_extent_item);
1264                 btrfs_set_file_extent_type(leaf, fi,
1265                                            BTRFS_FILE_EXTENT_REG);
1266                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1267                 btrfs_mark_buffer_dirty(leaf);
1268         } else {
1269                 fi = btrfs_item_ptr(leaf, del_slot - 1,
1270                            struct btrfs_file_extent_item);
1271                 btrfs_set_file_extent_type(leaf, fi,
1272                                            BTRFS_FILE_EXTENT_REG);
1273                 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1274                 btrfs_set_file_extent_num_bytes(leaf, fi,
1275                                                 extent_end - key.offset);
1276                 btrfs_mark_buffer_dirty(leaf);
1277
1278                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1279                 if (ret < 0) {
1280                         btrfs_abort_transaction(trans, root, ret);
1281                         goto out;
1282                 }
1283         }
1284 out:
1285         btrfs_free_path(path);
1286         return 0;
1287 }
1288
1289 /*
1290  * on error we return an unlocked page and the error value
1291  * on success we return a locked page and 0
1292  */
1293 static int prepare_uptodate_page(struct inode *inode,
1294                                  struct page *page, u64 pos,
1295                                  bool force_uptodate)
1296 {
1297         int ret = 0;
1298
1299         if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
1300             !PageUptodate(page)) {
1301                 ret = btrfs_readpage(NULL, page);
1302                 if (ret)
1303                         return ret;
1304                 lock_page(page);
1305                 if (!PageUptodate(page)) {
1306                         unlock_page(page);
1307                         return -EIO;
1308                 }
1309                 if (page->mapping != inode->i_mapping) {
1310                         unlock_page(page);
1311                         return -EAGAIN;
1312                 }
1313         }
1314         return 0;
1315 }
1316
1317 /*
1318  * this just gets pages into the page cache and locks them down.
1319  */
1320 static noinline int prepare_pages(struct inode *inode, struct page **pages,
1321                                   size_t num_pages, loff_t pos,
1322                                   size_t write_bytes, bool force_uptodate)
1323 {
1324         int i;
1325         unsigned long index = pos >> PAGE_CACHE_SHIFT;
1326         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
1327         int err = 0;
1328         int faili;
1329
1330         for (i = 0; i < num_pages; i++) {
1331 again:
1332                 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1333                                                mask | __GFP_WRITE);
1334                 if (!pages[i]) {
1335                         faili = i - 1;
1336                         err = -ENOMEM;
1337                         goto fail;
1338                 }
1339
1340                 if (i == 0)
1341                         err = prepare_uptodate_page(inode, pages[i], pos,
1342                                                     force_uptodate);
1343                 if (!err && i == num_pages - 1)
1344                         err = prepare_uptodate_page(inode, pages[i],
1345                                                     pos + write_bytes, false);
1346                 if (err) {
1347                         page_cache_release(pages[i]);
1348                         if (err == -EAGAIN) {
1349                                 err = 0;
1350                                 goto again;
1351                         }
1352                         faili = i - 1;
1353                         goto fail;
1354                 }
1355                 wait_on_page_writeback(pages[i]);
1356         }
1357
1358         return 0;
1359 fail:
1360         while (faili >= 0) {
1361                 unlock_page(pages[faili]);
1362                 page_cache_release(pages[faili]);
1363                 faili--;
1364         }
1365         return err;
1366
1367 }
1368
1369 /*
1370  * This function locks the extent and properly waits for data=ordered extents
1371  * to finish before allowing the pages to be modified if need.
1372  *
1373  * The return value:
1374  * 1 - the extent is locked
1375  * 0 - the extent is not locked, and everything is OK
1376  * -EAGAIN - need re-prepare the pages
1377  * the other < 0 number - Something wrong happens
1378  */
1379 static noinline int
1380 lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1381                                 size_t num_pages, loff_t pos,
1382                                 size_t write_bytes,
1383                                 u64 *lockstart, u64 *lockend,
1384                                 struct extent_state **cached_state)
1385 {
1386         struct btrfs_root *root = BTRFS_I(inode)->root;
1387         u64 start_pos;
1388         u64 last_pos;
1389         int i;
1390         int ret = 0;
1391
1392         start_pos = round_down(pos, root->sectorsize);
1393         last_pos = start_pos
1394                 + round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
1395
1396         if (start_pos < inode->i_size) {
1397                 struct btrfs_ordered_extent *ordered;
1398                 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1399                                  start_pos, last_pos, cached_state);
1400                 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1401                                                      last_pos - start_pos + 1);
1402                 if (ordered &&
1403                     ordered->file_offset + ordered->len > start_pos &&
1404                     ordered->file_offset <= last_pos) {
1405                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1406                                              start_pos, last_pos,
1407                                              cached_state, GFP_NOFS);
1408                         for (i = 0; i < num_pages; i++) {
1409                                 unlock_page(pages[i]);
1410                                 page_cache_release(pages[i]);
1411                         }
1412                         btrfs_start_ordered_extent(inode, ordered, 1);
1413                         btrfs_put_ordered_extent(ordered);
1414                         return -EAGAIN;
1415                 }
1416                 if (ordered)
1417                         btrfs_put_ordered_extent(ordered);
1418
1419                 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
1420                                   last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
1421                                   EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1422                                   0, 0, cached_state, GFP_NOFS);
1423                 *lockstart = start_pos;
1424                 *lockend = last_pos;
1425                 ret = 1;
1426         }
1427
1428         for (i = 0; i < num_pages; i++) {
1429                 if (clear_page_dirty_for_io(pages[i]))
1430                         account_page_redirty(pages[i]);
1431                 set_page_extent_mapped(pages[i]);
1432                 WARN_ON(!PageLocked(pages[i]));
1433         }
1434
1435         return ret;
1436 }
1437
1438 static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1439                                     size_t *write_bytes)
1440 {
1441         struct btrfs_root *root = BTRFS_I(inode)->root;
1442         struct btrfs_ordered_extent *ordered;
1443         u64 lockstart, lockend;
1444         u64 num_bytes;
1445         int ret;
1446
1447         ret = btrfs_start_write_no_snapshoting(root);
1448         if (!ret)
1449                 return -ENOSPC;
1450
1451         lockstart = round_down(pos, root->sectorsize);
1452         lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
1453
1454         while (1) {
1455                 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1456                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1457                                                      lockend - lockstart + 1);
1458                 if (!ordered) {
1459                         break;
1460                 }
1461                 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1462                 btrfs_start_ordered_extent(inode, ordered, 1);
1463                 btrfs_put_ordered_extent(ordered);
1464         }
1465
1466         num_bytes = lockend - lockstart + 1;
1467         ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
1468         if (ret <= 0) {
1469                 ret = 0;
1470                 btrfs_end_write_no_snapshoting(root);
1471         } else {
1472                 *write_bytes = min_t(size_t, *write_bytes ,
1473                                      num_bytes - pos + lockstart);
1474         }
1475
1476         unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
1477
1478         return ret;
1479 }
1480
1481 static noinline ssize_t __btrfs_buffered_write(struct file *file,
1482                                                struct iov_iter *i,
1483                                                loff_t pos)
1484 {
1485         struct inode *inode = file_inode(file);
1486         struct btrfs_root *root = BTRFS_I(inode)->root;
1487         struct page **pages = NULL;
1488         struct extent_state *cached_state = NULL;
1489         u64 release_bytes = 0;
1490         u64 lockstart;
1491         u64 lockend;
1492         size_t num_written = 0;
1493         int nrptrs;
1494         int ret = 0;
1495         bool only_release_metadata = false;
1496         bool force_page_uptodate = false;
1497         bool need_unlock;
1498
1499         nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE),
1500                         PAGE_CACHE_SIZE / (sizeof(struct page *)));
1501         nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1502         nrptrs = max(nrptrs, 8);
1503         pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1504         if (!pages)
1505                 return -ENOMEM;
1506
1507         while (iov_iter_count(i) > 0) {
1508                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1509                 size_t sector_offset;
1510                 size_t write_bytes = min(iov_iter_count(i),
1511                                          nrptrs * (size_t)PAGE_CACHE_SIZE -
1512                                          offset);
1513                 size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
1514                                                 PAGE_CACHE_SIZE);
1515                 size_t reserve_bytes;
1516                 size_t dirty_pages;
1517                 size_t copied;
1518                 size_t dirty_sectors;
1519                 size_t num_sectors;
1520
1521                 WARN_ON(num_pages > nrptrs);
1522
1523                 /*
1524                  * Fault pages before locking them in prepare_pages
1525                  * to avoid recursive lock
1526                  */
1527                 if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
1528                         ret = -EFAULT;
1529                         break;
1530                 }
1531
1532                 sector_offset = pos & (root->sectorsize - 1);
1533                 reserve_bytes = round_up(write_bytes + sector_offset,
1534                                 root->sectorsize);
1535
1536                 if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1537                                              BTRFS_INODE_PREALLOC)) {
1538                         ret = check_can_nocow(inode, pos, &write_bytes);
1539                         if (ret < 0)
1540                                 break;
1541                         if (ret > 0) {
1542                                 /*
1543                                  * For nodata cow case, no need to reserve
1544                                  * data space.
1545                                  */
1546                                 only_release_metadata = true;
1547                                 /*
1548                                  * our prealloc extent may be smaller than
1549                                  * write_bytes, so scale down.
1550                                  */
1551                                 num_pages = DIV_ROUND_UP(write_bytes + offset,
1552                                                          PAGE_CACHE_SIZE);
1553                                 reserve_bytes = round_up(write_bytes
1554                                                         + sector_offset,
1555                                                         root->sectorsize);
1556                                 goto reserve_metadata;
1557                         }
1558                 }
1559                 ret = btrfs_check_data_free_space(inode, pos, write_bytes);
1560                 if (ret < 0)
1561                         break;
1562
1563 reserve_metadata:
1564                 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
1565                 if (ret) {
1566                         if (!only_release_metadata)
1567                                 btrfs_free_reserved_data_space(inode, pos,
1568                                                                write_bytes);
1569                         else
1570                                 btrfs_end_write_no_snapshoting(root);
1571                         break;
1572                 }
1573
1574                 release_bytes = reserve_bytes;
1575                 need_unlock = false;
1576 again:
1577                 /*
1578                  * This is going to setup the pages array with the number of
1579                  * pages we want, so we don't really need to worry about the
1580                  * contents of pages from loop to loop
1581                  */
1582                 ret = prepare_pages(inode, pages, num_pages,
1583                                     pos, write_bytes,
1584                                     force_page_uptodate);
1585                 if (ret)
1586                         break;
1587
1588                 ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
1589                                                 pos, write_bytes, &lockstart,
1590                                                 &lockend, &cached_state);
1591                 if (ret < 0) {
1592                         if (ret == -EAGAIN)
1593                                 goto again;
1594                         break;
1595                 } else if (ret > 0) {
1596                         need_unlock = true;
1597                         ret = 0;
1598                 }
1599
1600                 copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1601
1602                 /*
1603                  * if we have trouble faulting in the pages, fall
1604                  * back to one page at a time
1605                  */
1606                 if (copied < write_bytes)
1607                         nrptrs = 1;
1608
1609                 if (copied == 0) {
1610                         force_page_uptodate = true;
1611                         dirty_pages = 0;
1612                 } else {
1613                         force_page_uptodate = false;
1614                         dirty_pages = DIV_ROUND_UP(copied + offset,
1615                                                    PAGE_CACHE_SIZE);
1616                 }
1617
1618                 /*
1619                  * If we had a short copy we need to release the excess delaloc
1620                  * bytes we reserved.  We need to increment outstanding_extents
1621                  * because btrfs_delalloc_release_space will decrement it, but
1622                  * we still have an outstanding extent for the chunk we actually
1623                  * managed to copy.
1624                  */
1625                 num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
1626                                                 reserve_bytes);
1627                 dirty_sectors = round_up(copied + sector_offset,
1628                                         root->sectorsize);
1629                 dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
1630                                                 dirty_sectors);
1631
1632                 if (num_sectors > dirty_sectors) {
1633                         release_bytes = (write_bytes - copied)
1634                                 & ~((u64)root->sectorsize - 1);
1635                         if (copied > 0) {
1636                                 spin_lock(&BTRFS_I(inode)->lock);
1637                                 BTRFS_I(inode)->outstanding_extents++;
1638                                 spin_unlock(&BTRFS_I(inode)->lock);
1639                         }
1640                         if (only_release_metadata) {
1641                                 btrfs_delalloc_release_metadata(inode,
1642                                                                 release_bytes);
1643                         } else {
1644                                 u64 __pos;
1645
1646                                 __pos = round_down(pos, root->sectorsize) +
1647                                         (dirty_pages << PAGE_CACHE_SHIFT);
1648                                 btrfs_delalloc_release_space(inode, __pos,
1649                                                              release_bytes);
1650                         }
1651                 }
1652
1653                 release_bytes = round_up(copied + sector_offset,
1654                                         root->sectorsize);
1655
1656                 if (copied > 0)
1657                         ret = btrfs_dirty_pages(root, inode, pages,
1658                                                 dirty_pages, pos, copied,
1659                                                 NULL);
1660                 if (need_unlock)
1661                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
1662                                              lockstart, lockend, &cached_state,
1663                                              GFP_NOFS);
1664                 if (ret) {
1665                         btrfs_drop_pages(pages, num_pages);
1666                         break;
1667                 }
1668
1669                 release_bytes = 0;
1670                 if (only_release_metadata)
1671                         btrfs_end_write_no_snapshoting(root);
1672
1673                 if (only_release_metadata && copied > 0) {
1674                         lockstart = round_down(pos, root->sectorsize);
1675                         lockend = round_up(pos + copied, root->sectorsize) - 1;
1676
1677                         set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
1678                                        lockend, EXTENT_NORESERVE, NULL,
1679                                        NULL, GFP_NOFS);
1680                         only_release_metadata = false;
1681                 }
1682
1683                 btrfs_drop_pages(pages, num_pages);
1684
1685                 cond_resched();
1686
1687                 balance_dirty_pages_ratelimited(inode->i_mapping);
1688                 if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1)
1689                         btrfs_btree_balance_dirty(root);
1690
1691                 pos += copied;
1692                 num_written += copied;
1693         }
1694
1695         kfree(pages);
1696
1697         if (release_bytes) {
1698                 if (only_release_metadata) {
1699                         btrfs_end_write_no_snapshoting(root);
1700                         btrfs_delalloc_release_metadata(inode, release_bytes);
1701                 } else {
1702                         btrfs_delalloc_release_space(inode, pos, release_bytes);
1703                 }
1704         }
1705
1706         return num_written ? num_written : ret;
1707 }
1708
1709 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1710                                     struct iov_iter *from,
1711                                     loff_t pos)
1712 {
1713         struct file *file = iocb->ki_filp;
1714         struct inode *inode = file_inode(file);
1715         ssize_t written;
1716         ssize_t written_buffered;
1717         loff_t endbyte;
1718         int err;
1719
1720         written = generic_file_direct_write(iocb, from, pos);
1721
1722         if (written < 0 || !iov_iter_count(from))
1723                 return written;
1724
1725         pos += written;
1726         written_buffered = __btrfs_buffered_write(file, from, pos);
1727         if (written_buffered < 0) {
1728                 err = written_buffered;
1729                 goto out;
1730         }
1731         /*
1732          * Ensure all data is persisted. We want the next direct IO read to be
1733          * able to read what was just written.
1734          */
1735         endbyte = pos + written_buffered - 1;
1736         err = btrfs_fdatawrite_range(inode, pos, endbyte);
1737         if (err)
1738                 goto out;
1739         err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1740         if (err)
1741                 goto out;
1742         written += written_buffered;
1743         iocb->ki_pos = pos + written_buffered;
1744         invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
1745                                  endbyte >> PAGE_CACHE_SHIFT);
1746 out:
1747         return written ? written : err;
1748 }
1749
1750 static void update_time_for_write(struct inode *inode)
1751 {
1752         struct timespec now;
1753
1754         if (IS_NOCMTIME(inode))
1755                 return;
1756
1757         now = current_fs_time(inode->i_sb);
1758         if (!timespec_equal(&inode->i_mtime, &now))
1759                 inode->i_mtime = now;
1760
1761         if (!timespec_equal(&inode->i_ctime, &now))
1762                 inode->i_ctime = now;
1763
1764         if (IS_I_VERSION(inode))
1765                 inode_inc_iversion(inode);
1766 }
1767
1768 static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
1769                                     struct iov_iter *from)
1770 {
1771         struct file *file = iocb->ki_filp;
1772         struct inode *inode = file_inode(file);
1773         struct btrfs_root *root = BTRFS_I(inode)->root;
1774         u64 start_pos;
1775         u64 end_pos;
1776         ssize_t num_written = 0;
1777         bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
1778         ssize_t err;
1779         loff_t pos;
1780         size_t count;
1781         loff_t oldsize;
1782         int clean_page = 0;
1783
1784         inode_lock(inode);
1785         err = generic_write_checks(iocb, from);
1786         if (err <= 0) {
1787                 inode_unlock(inode);
1788                 return err;
1789         }
1790
1791         current->backing_dev_info = inode_to_bdi(inode);
1792         err = file_remove_privs(file);
1793         if (err) {
1794                 inode_unlock(inode);
1795                 goto out;
1796         }
1797
1798         /*
1799          * If BTRFS flips readonly due to some impossible error
1800          * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1801          * although we have opened a file as writable, we have
1802          * to stop this write operation to ensure FS consistency.
1803          */
1804         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
1805                 inode_unlock(inode);
1806                 err = -EROFS;
1807                 goto out;
1808         }
1809
1810         /*
1811          * We reserve space for updating the inode when we reserve space for the
1812          * extent we are going to write, so we will enospc out there.  We don't
1813          * need to start yet another transaction to update the inode as we will
1814          * update the inode when we finish writing whatever data we write.
1815          */
1816         update_time_for_write(inode);
1817
1818         pos = iocb->ki_pos;
1819         count = iov_iter_count(from);
1820         start_pos = round_down(pos, root->sectorsize);
1821         oldsize = i_size_read(inode);
1822         if (start_pos > oldsize) {
1823                 /* Expand hole size to cover write data, preventing empty gap */
1824                 end_pos = round_up(pos + count, root->sectorsize);
1825                 err = btrfs_cont_expand(inode, oldsize, end_pos);
1826                 if (err) {
1827                         inode_unlock(inode);
1828                         goto out;
1829                 }
1830                 if (start_pos > round_up(oldsize, root->sectorsize))
1831                         clean_page = 1;
1832         }
1833
1834         if (sync)
1835                 atomic_inc(&BTRFS_I(inode)->sync_writers);
1836
1837         if (iocb->ki_flags & IOCB_DIRECT) {
1838                 num_written = __btrfs_direct_write(iocb, from, pos);
1839         } else {
1840                 num_written = __btrfs_buffered_write(file, from, pos);
1841                 if (num_written > 0)
1842                         iocb->ki_pos = pos + num_written;
1843                 if (clean_page)
1844                         pagecache_isize_extended(inode, oldsize,
1845                                                 i_size_read(inode));
1846         }
1847
1848         inode_unlock(inode);
1849
1850         /*
1851          * We also have to set last_sub_trans to the current log transid,
1852          * otherwise subsequent syncs to a file that's been synced in this
1853          * transaction will appear to have already occured.
1854          */
1855         spin_lock(&BTRFS_I(inode)->lock);
1856         BTRFS_I(inode)->last_sub_trans = root->log_transid;
1857         spin_unlock(&BTRFS_I(inode)->lock);
1858         if (num_written > 0) {
1859                 err = generic_write_sync(file, pos, num_written);
1860                 if (err < 0)
1861                         num_written = err;
1862         }
1863
1864         if (sync)
1865                 atomic_dec(&BTRFS_I(inode)->sync_writers);
1866 out:
1867         current->backing_dev_info = NULL;
1868         return num_written ? num_written : err;
1869 }
1870
1871 int btrfs_release_file(struct inode *inode, struct file *filp)
1872 {
1873         if (filp->private_data)
1874                 btrfs_ioctl_trans_end(filp);
1875         /*
1876          * ordered_data_close is set by settattr when we are about to truncate
1877          * a file from a non-zero size to a zero size.  This tries to
1878          * flush down new bytes that may have been written if the
1879          * application were using truncate to replace a file in place.
1880          */
1881         if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1882                                &BTRFS_I(inode)->runtime_flags))
1883                         filemap_flush(inode->i_mapping);
1884         return 0;
1885 }
1886
1887 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1888 {
1889         int ret;
1890
1891         atomic_inc(&BTRFS_I(inode)->sync_writers);
1892         ret = btrfs_fdatawrite_range(inode, start, end);
1893         atomic_dec(&BTRFS_I(inode)->sync_writers);
1894
1895         return ret;
1896 }
1897
1898 /*
1899  * fsync call for both files and directories.  This logs the inode into
1900  * the tree log instead of forcing full commits whenever possible.
1901  *
1902  * It needs to call filemap_fdatawait so that all ordered extent updates are
1903  * in the metadata btree are up to date for copying to the log.
1904  *
1905  * It drops the inode mutex before doing the tree log commit.  This is an
1906  * important optimization for directories because holding the mutex prevents
1907  * new operations on the dir while we write to disk.
1908  */
1909 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1910 {
1911         struct dentry *dentry = file->f_path.dentry;
1912         struct inode *inode = d_inode(dentry);
1913         struct btrfs_root *root = BTRFS_I(inode)->root;
1914         struct btrfs_trans_handle *trans;
1915         struct btrfs_log_ctx ctx;
1916         int ret = 0;
1917         bool full_sync = 0;
1918         u64 len;
1919
1920         /*
1921          * The range length can be represented by u64, we have to do the typecasts
1922          * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
1923          */
1924         len = (u64)end - (u64)start + 1;
1925         trace_btrfs_sync_file(file, datasync);
1926
1927         /*
1928          * We write the dirty pages in the range and wait until they complete
1929          * out of the ->i_mutex. If so, we can flush the dirty pages by
1930          * multi-task, and make the performance up.  See
1931          * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1932          */
1933         ret = start_ordered_ops(inode, start, end);
1934         if (ret)
1935                 return ret;
1936
1937         inode_lock(inode);
1938         atomic_inc(&root->log_batch);
1939         full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1940                              &BTRFS_I(inode)->runtime_flags);
1941         /*
1942          * We might have have had more pages made dirty after calling
1943          * start_ordered_ops and before acquiring the inode's i_mutex.
1944          */
1945         if (full_sync) {
1946                 /*
1947                  * For a full sync, we need to make sure any ordered operations
1948                  * start and finish before we start logging the inode, so that
1949                  * all extents are persisted and the respective file extent
1950                  * items are in the fs/subvol btree.
1951                  */
1952                 ret = btrfs_wait_ordered_range(inode, start, len);
1953         } else {
1954                 /*
1955                  * Start any new ordered operations before starting to log the
1956                  * inode. We will wait for them to finish in btrfs_sync_log().
1957                  *
1958                  * Right before acquiring the inode's mutex, we might have new
1959                  * writes dirtying pages, which won't immediately start the
1960                  * respective ordered operations - that is done through the
1961                  * fill_delalloc callbacks invoked from the writepage and
1962                  * writepages address space operations. So make sure we start
1963                  * all ordered operations before starting to log our inode. Not
1964                  * doing this means that while logging the inode, writeback
1965                  * could start and invoke writepage/writepages, which would call
1966                  * the fill_delalloc callbacks (cow_file_range,
1967                  * submit_compressed_extents). These callbacks add first an
1968                  * extent map to the modified list of extents and then create
1969                  * the respective ordered operation, which means in
1970                  * tree-log.c:btrfs_log_inode() we might capture all existing
1971                  * ordered operations (with btrfs_get_logged_extents()) before
1972                  * the fill_delalloc callback adds its ordered operation, and by
1973                  * the time we visit the modified list of extent maps (with
1974                  * btrfs_log_changed_extents()), we see and process the extent
1975                  * map they created. We then use the extent map to construct a
1976                  * file extent item for logging without waiting for the
1977                  * respective ordered operation to finish - this file extent
1978                  * item points to a disk location that might not have yet been
1979                  * written to, containing random data - so after a crash a log
1980                  * replay will make our inode have file extent items that point
1981                  * to disk locations containing invalid data, as we returned
1982                  * success to userspace without waiting for the respective
1983                  * ordered operation to finish, because it wasn't captured by
1984                  * btrfs_get_logged_extents().
1985                  */
1986                 ret = start_ordered_ops(inode, start, end);
1987         }
1988         if (ret) {
1989                 inode_unlock(inode);
1990                 goto out;
1991         }
1992         atomic_inc(&root->log_batch);
1993
1994         /*
1995          * If the last transaction that changed this file was before the current
1996          * transaction and we have the full sync flag set in our inode, we can
1997          * bail out now without any syncing.
1998          *
1999          * Note that we can't bail out if the full sync flag isn't set. This is
2000          * because when the full sync flag is set we start all ordered extents
2001          * and wait for them to fully complete - when they complete they update
2002          * the inode's last_trans field through:
2003          *
2004          *     btrfs_finish_ordered_io() ->
2005          *         btrfs_update_inode_fallback() ->
2006          *             btrfs_update_inode() ->
2007          *                 btrfs_set_inode_last_trans()
2008          *
2009          * So we are sure that last_trans is up to date and can do this check to
2010          * bail out safely. For the fast path, when the full sync flag is not
2011          * set in our inode, we can not do it because we start only our ordered
2012          * extents and don't wait for them to complete (that is when
2013          * btrfs_finish_ordered_io runs), so here at this point their last_trans
2014          * value might be less than or equals to fs_info->last_trans_committed,
2015          * and setting a speculative last_trans for an inode when a buffered
2016          * write is made (such as fs_info->generation + 1 for example) would not
2017          * be reliable since after setting the value and before fsync is called
2018          * any number of transactions can start and commit (transaction kthread
2019          * commits the current transaction periodically), and a transaction
2020          * commit does not start nor waits for ordered extents to complete.
2021          */
2022         smp_mb();
2023         if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
2024             (BTRFS_I(inode)->last_trans <=
2025              root->fs_info->last_trans_committed &&
2026              (full_sync ||
2027               !btrfs_have_ordered_extents_in_range(inode, start, len)))) {
2028                 /*
2029                  * We'v had everything committed since the last time we were
2030                  * modified so clear this flag in case it was set for whatever
2031                  * reason, it's no longer relevant.
2032                  */
2033                 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2034                           &BTRFS_I(inode)->runtime_flags);
2035                 inode_unlock(inode);
2036                 goto out;
2037         }
2038
2039         /*
2040          * ok we haven't committed the transaction yet, lets do a commit
2041          */
2042         if (file->private_data)
2043                 btrfs_ioctl_trans_end(file);
2044
2045         /*
2046          * We use start here because we will need to wait on the IO to complete
2047          * in btrfs_sync_log, which could require joining a transaction (for
2048          * example checking cross references in the nocow path).  If we use join
2049          * here we could get into a situation where we're waiting on IO to
2050          * happen that is blocked on a transaction trying to commit.  With start
2051          * we inc the extwriter counter, so we wait for all extwriters to exit
2052          * before we start blocking join'ers.  This comment is to keep somebody
2053          * from thinking they are super smart and changing this to
2054          * btrfs_join_transaction *cough*Josef*cough*.
2055          */
2056         trans = btrfs_start_transaction(root, 0);
2057         if (IS_ERR(trans)) {
2058                 ret = PTR_ERR(trans);
2059                 inode_unlock(inode);
2060                 goto out;
2061         }
2062         trans->sync = true;
2063
2064         btrfs_init_log_ctx(&ctx);
2065
2066         ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
2067         if (ret < 0) {
2068                 /* Fallthrough and commit/free transaction. */
2069                 ret = 1;
2070         }
2071
2072         /* we've logged all the items and now have a consistent
2073          * version of the file in the log.  It is possible that
2074          * someone will come in and modify the file, but that's
2075          * fine because the log is consistent on disk, and we
2076          * have references to all of the file's extents
2077          *
2078          * It is possible that someone will come in and log the
2079          * file again, but that will end up using the synchronization
2080          * inside btrfs_sync_log to keep things safe.
2081          */
2082         inode_unlock(inode);
2083
2084         /*
2085          * If any of the ordered extents had an error, just return it to user
2086          * space, so that the application knows some writes didn't succeed and
2087          * can take proper action (retry for e.g.). Blindly committing the
2088          * transaction in this case, would fool userspace that everything was
2089          * successful. And we also want to make sure our log doesn't contain
2090          * file extent items pointing to extents that weren't fully written to -
2091          * just like in the non fast fsync path, where we check for the ordered
2092          * operation's error flag before writing to the log tree and return -EIO
2093          * if any of them had this flag set (btrfs_wait_ordered_range) -
2094          * therefore we need to check for errors in the ordered operations,
2095          * which are indicated by ctx.io_err.
2096          */
2097         if (ctx.io_err) {
2098                 btrfs_end_transaction(trans, root);
2099                 ret = ctx.io_err;
2100                 goto out;
2101         }
2102
2103         if (ret != BTRFS_NO_LOG_SYNC) {
2104                 if (!ret) {
2105                         ret = btrfs_sync_log(trans, root, &ctx);
2106                         if (!ret) {
2107                                 ret = btrfs_end_transaction(trans, root);
2108                                 goto out;
2109                         }
2110                 }
2111                 if (!full_sync) {
2112                         ret = btrfs_wait_ordered_range(inode, start, len);
2113                         if (ret) {
2114                                 btrfs_end_transaction(trans, root);
2115                                 goto out;
2116                         }
2117                 }
2118                 ret = btrfs_commit_transaction(trans, root);
2119         } else {
2120                 ret = btrfs_end_transaction(trans, root);
2121         }
2122 out:
2123         return ret > 0 ? -EIO : ret;
2124 }
2125
2126 static const struct vm_operations_struct btrfs_file_vm_ops = {
2127         .fault          = filemap_fault,
2128         .map_pages      = filemap_map_pages,
2129         .page_mkwrite   = btrfs_page_mkwrite,
2130 };
2131
2132 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
2133 {
2134         struct address_space *mapping = filp->f_mapping;
2135
2136         if (!mapping->a_ops->readpage)
2137                 return -ENOEXEC;
2138
2139         file_accessed(filp);
2140         vma->vm_ops = &btrfs_file_vm_ops;
2141
2142         return 0;
2143 }
2144
2145 static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
2146                           int slot, u64 start, u64 end)
2147 {
2148         struct btrfs_file_extent_item *fi;
2149         struct btrfs_key key;
2150
2151         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2152                 return 0;
2153
2154         btrfs_item_key_to_cpu(leaf, &key, slot);
2155         if (key.objectid != btrfs_ino(inode) ||
2156             key.type != BTRFS_EXTENT_DATA_KEY)
2157                 return 0;
2158
2159         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2160
2161         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2162                 return 0;
2163
2164         if (btrfs_file_extent_disk_bytenr(leaf, fi))
2165                 return 0;
2166
2167         if (key.offset == end)
2168                 return 1;
2169         if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2170                 return 1;
2171         return 0;
2172 }
2173
2174 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
2175                       struct btrfs_path *path, u64 offset, u64 end)
2176 {
2177         struct btrfs_root *root = BTRFS_I(inode)->root;
2178         struct extent_buffer *leaf;
2179         struct btrfs_file_extent_item *fi;
2180         struct extent_map *hole_em;
2181         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2182         struct btrfs_key key;
2183         int ret;
2184
2185         if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
2186                 goto out;
2187
2188         key.objectid = btrfs_ino(inode);
2189         key.type = BTRFS_EXTENT_DATA_KEY;
2190         key.offset = offset;
2191
2192         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2193         if (ret < 0)
2194                 return ret;
2195         BUG_ON(!ret);
2196
2197         leaf = path->nodes[0];
2198         if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
2199                 u64 num_bytes;
2200
2201                 path->slots[0]--;
2202                 fi = btrfs_item_ptr(leaf, path->slots[0],
2203                                     struct btrfs_file_extent_item);
2204                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2205                         end - offset;
2206                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2207                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2208                 btrfs_set_file_extent_offset(leaf, fi, 0);
2209                 btrfs_mark_buffer_dirty(leaf);
2210                 goto out;
2211         }
2212
2213         if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2214                 u64 num_bytes;
2215
2216                 key.offset = offset;
2217                 btrfs_set_item_key_safe(root->fs_info, path, &key);
2218                 fi = btrfs_item_ptr(leaf, path->slots[0],
2219                                     struct btrfs_file_extent_item);
2220                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2221                         offset;
2222                 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2223                 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2224                 btrfs_set_file_extent_offset(leaf, fi, 0);
2225                 btrfs_mark_buffer_dirty(leaf);
2226                 goto out;
2227         }
2228         btrfs_release_path(path);
2229
2230         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
2231                                        0, 0, end - offset, 0, end - offset,
2232                                        0, 0, 0);
2233         if (ret)
2234                 return ret;
2235
2236 out:
2237         btrfs_release_path(path);
2238
2239         hole_em = alloc_extent_map();
2240         if (!hole_em) {
2241                 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2242                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2243                         &BTRFS_I(inode)->runtime_flags);
2244         } else {
2245                 hole_em->start = offset;
2246                 hole_em->len = end - offset;
2247                 hole_em->ram_bytes = hole_em->len;
2248                 hole_em->orig_start = offset;
2249
2250                 hole_em->block_start = EXTENT_MAP_HOLE;
2251                 hole_em->block_len = 0;
2252                 hole_em->orig_block_len = 0;
2253                 hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
2254                 hole_em->compress_type = BTRFS_COMPRESS_NONE;
2255                 hole_em->generation = trans->transid;
2256
2257                 do {
2258                         btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2259                         write_lock(&em_tree->lock);
2260                         ret = add_extent_mapping(em_tree, hole_em, 1);
2261                         write_unlock(&em_tree->lock);
2262                 } while (ret == -EEXIST);
2263                 free_extent_map(hole_em);
2264                 if (ret)
2265                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2266                                 &BTRFS_I(inode)->runtime_flags);
2267         }
2268
2269         return 0;
2270 }
2271
2272 /*
2273  * Find a hole extent on given inode and change start/len to the end of hole
2274  * extent.(hole/vacuum extent whose em->start <= start &&
2275  *         em->start + em->len > start)
2276  * When a hole extent is found, return 1 and modify start/len.
2277  */
2278 static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2279 {
2280         struct extent_map *em;
2281         int ret = 0;
2282
2283         em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
2284         if (IS_ERR_OR_NULL(em)) {
2285                 if (!em)
2286                         ret = -ENOMEM;
2287                 else
2288                         ret = PTR_ERR(em);
2289                 return ret;
2290         }
2291
2292         /* Hole or vacuum extent(only exists in no-hole mode) */
2293         if (em->block_start == EXTENT_MAP_HOLE) {
2294                 ret = 1;
2295                 *len = em->start + em->len > *start + *len ?
2296                        0 : *start + *len - em->start - em->len;
2297                 *start = em->start + em->len;
2298         }
2299         free_extent_map(em);
2300         return ret;
2301 }
2302
2303 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2304 {
2305         struct btrfs_root *root = BTRFS_I(inode)->root;
2306         struct extent_state *cached_state = NULL;
2307         struct btrfs_path *path;
2308         struct btrfs_block_rsv *rsv;
2309         struct btrfs_trans_handle *trans;
2310         u64 lockstart;
2311         u64 lockend;
2312         u64 tail_start;
2313         u64 tail_len;
2314         u64 orig_start = offset;
2315         u64 cur_offset;
2316         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
2317         u64 drop_end;
2318         int ret = 0;
2319         int err = 0;
2320         unsigned int rsv_count;
2321         bool same_block;
2322         bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
2323         u64 ino_size;
2324         bool truncated_block = false;
2325         bool updated_inode = false;
2326
2327         ret = btrfs_wait_ordered_range(inode, offset, len);
2328         if (ret)
2329                 return ret;
2330
2331         inode_lock(inode);
2332         ino_size = round_up(inode->i_size, root->sectorsize);
2333         ret = find_first_non_hole(inode, &offset, &len);
2334         if (ret < 0)
2335                 goto out_only_mutex;
2336         if (ret && !len) {
2337                 /* Already in a large hole */
2338                 ret = 0;
2339                 goto out_only_mutex;
2340         }
2341
2342         lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
2343         lockend = round_down(offset + len,
2344                              BTRFS_I(inode)->root->sectorsize) - 1;
2345         same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
2346                 == (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
2347         /*
2348          * We needn't truncate any block which is beyond the end of the file
2349          * because we are sure there is no data there.
2350          */
2351         /*
2352          * Only do this if we are in the same block and we aren't doing the
2353          * entire block.
2354          */
2355         if (same_block && len < root->sectorsize) {
2356                 if (offset < ino_size) {
2357                         truncated_block = true;
2358                         ret = btrfs_truncate_block(inode, offset, len, 0);
2359                 } else {
2360                         ret = 0;
2361                 }
2362                 goto out_only_mutex;
2363         }
2364
2365         /* zero back part of the first block */
2366         if (offset < ino_size) {
2367                 truncated_block = true;
2368                 ret = btrfs_truncate_block(inode, offset, 0, 0);
2369                 if (ret) {
2370                         inode_unlock(inode);
2371                         return ret;
2372                 }
2373         }
2374
2375         /* Check the aligned pages after the first unaligned page,
2376          * if offset != orig_start, which means the first unaligned page
2377          * including serveral following pages are already in holes,
2378          * the extra check can be skipped */
2379         if (offset == orig_start) {
2380                 /* after truncate page, check hole again */
2381                 len = offset + len - lockstart;
2382                 offset = lockstart;
2383                 ret = find_first_non_hole(inode, &offset, &len);
2384                 if (ret < 0)
2385                         goto out_only_mutex;
2386                 if (ret && !len) {
2387                         ret = 0;
2388                         goto out_only_mutex;
2389                 }
2390                 lockstart = offset;
2391         }
2392
2393         /* Check the tail unaligned part is in a hole */
2394         tail_start = lockend + 1;
2395         tail_len = offset + len - tail_start;
2396         if (tail_len) {
2397                 ret = find_first_non_hole(inode, &tail_start, &tail_len);
2398                 if (unlikely(ret < 0))
2399                         goto out_only_mutex;
2400                 if (!ret) {
2401                         /* zero the front end of the last page */
2402                         if (tail_start + tail_len < ino_size) {
2403                                 truncated_block = true;
2404                                 ret = btrfs_truncate_block(inode,
2405                                                         tail_start + tail_len,
2406                                                         0, 1);
2407                                 if (ret)
2408                                         goto out_only_mutex;
2409                         }
2410                 }
2411         }
2412
2413         if (lockend < lockstart) {
2414                 ret = 0;
2415                 goto out_only_mutex;
2416         }
2417
2418         while (1) {
2419                 struct btrfs_ordered_extent *ordered;
2420
2421                 truncate_pagecache_range(inode, lockstart, lockend);
2422
2423                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2424                                  &cached_state);
2425                 ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
2426
2427                 /*
2428                  * We need to make sure we have no ordered extents in this range
2429                  * and nobody raced in and read a page in this range, if we did
2430                  * we need to try again.
2431                  */
2432                 if ((!ordered ||
2433                     (ordered->file_offset + ordered->len <= lockstart ||
2434                      ordered->file_offset > lockend)) &&
2435                      !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
2436                         if (ordered)
2437                                 btrfs_put_ordered_extent(ordered);
2438                         break;
2439                 }
2440                 if (ordered)
2441                         btrfs_put_ordered_extent(ordered);
2442                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
2443                                      lockend, &cached_state, GFP_NOFS);
2444                 ret = btrfs_wait_ordered_range(inode, lockstart,
2445                                                lockend - lockstart + 1);
2446                 if (ret) {
2447                         inode_unlock(inode);
2448                         return ret;
2449                 }
2450         }
2451
2452         path = btrfs_alloc_path();
2453         if (!path) {
2454                 ret = -ENOMEM;
2455                 goto out;
2456         }
2457
2458         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
2459         if (!rsv) {
2460                 ret = -ENOMEM;
2461                 goto out_free;
2462         }
2463         rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
2464         rsv->failfast = 1;
2465
2466         /*
2467          * 1 - update the inode
2468          * 1 - removing the extents in the range
2469          * 1 - adding the hole extent if no_holes isn't set
2470          */
2471         rsv_count = no_holes ? 2 : 3;
2472         trans = btrfs_start_transaction(root, rsv_count);
2473         if (IS_ERR(trans)) {
2474                 err = PTR_ERR(trans);
2475                 goto out_free;
2476         }
2477
2478         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
2479                                       min_size);
2480         BUG_ON(ret);
2481         trans->block_rsv = rsv;
2482
2483         cur_offset = lockstart;
2484         len = lockend - cur_offset;
2485         while (cur_offset < lockend) {
2486                 ret = __btrfs_drop_extents(trans, root, inode, path,
2487                                            cur_offset, lockend + 1,
2488                                            &drop_end, 1, 0, 0, NULL);
2489                 if (ret != -ENOSPC)
2490                         break;
2491
2492                 trans->block_rsv = &root->fs_info->trans_block_rsv;
2493
2494                 if (cur_offset < ino_size) {
2495                         ret = fill_holes(trans, inode, path, cur_offset,
2496                                          drop_end);
2497                         if (ret) {
2498                                 err = ret;
2499                                 break;
2500                         }
2501                 }
2502
2503                 cur_offset = drop_end;
2504
2505                 ret = btrfs_update_inode(trans, root, inode);
2506                 if (ret) {
2507                         err = ret;
2508                         break;
2509                 }
2510
2511                 btrfs_end_transaction(trans, root);
2512                 btrfs_btree_balance_dirty(root);
2513
2514                 trans = btrfs_start_transaction(root, rsv_count);
2515                 if (IS_ERR(trans)) {
2516                         ret = PTR_ERR(trans);
2517                         trans = NULL;
2518                         break;
2519                 }
2520
2521                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
2522                                               rsv, min_size);
2523                 BUG_ON(ret);    /* shouldn't happen */
2524                 trans->block_rsv = rsv;
2525
2526                 ret = find_first_non_hole(inode, &cur_offset, &len);
2527                 if (unlikely(ret < 0))
2528                         break;
2529                 if (ret && !len) {
2530                         ret = 0;
2531                         break;
2532                 }
2533         }
2534
2535         if (ret) {
2536                 err = ret;
2537                 goto out_trans;
2538         }
2539
2540         trans->block_rsv = &root->fs_info->trans_block_rsv;
2541         /*
2542          * If we are using the NO_HOLES feature we might have had already an
2543          * hole that overlaps a part of the region [lockstart, lockend] and
2544          * ends at (or beyond) lockend. Since we have no file extent items to
2545          * represent holes, drop_end can be less than lockend and so we must
2546          * make sure we have an extent map representing the existing hole (the
2547          * call to __btrfs_drop_extents() might have dropped the existing extent
2548          * map representing the existing hole), otherwise the fast fsync path
2549          * will not record the existence of the hole region
2550          * [existing_hole_start, lockend].
2551          */
2552         if (drop_end <= lockend)
2553                 drop_end = lockend + 1;
2554         /*
2555          * Don't insert file hole extent item if it's for a range beyond eof
2556          * (because it's useless) or if it represents a 0 bytes range (when
2557          * cur_offset == drop_end).
2558          */
2559         if (cur_offset < ino_size && cur_offset < drop_end) {
2560                 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
2561                 if (ret) {
2562                         err = ret;
2563                         goto out_trans;
2564                 }
2565         }
2566
2567 out_trans:
2568         if (!trans)
2569                 goto out_free;
2570
2571         inode_inc_iversion(inode);
2572         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2573
2574         trans->block_rsv = &root->fs_info->trans_block_rsv;
2575         ret = btrfs_update_inode(trans, root, inode);
2576         updated_inode = true;
2577         btrfs_end_transaction(trans, root);
2578         btrfs_btree_balance_dirty(root);
2579 out_free:
2580         btrfs_free_path(path);
2581         btrfs_free_block_rsv(root, rsv);
2582 out:
2583         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2584                              &cached_state, GFP_NOFS);
2585 out_only_mutex:
2586         if (!updated_inode && truncated_block && !ret && !err) {
2587                 /*
2588                  * If we only end up zeroing part of a page, we still need to
2589                  * update the inode item, so that all the time fields are
2590                  * updated as well as the necessary btrfs inode in memory fields
2591                  * for detecting, at fsync time, if the inode isn't yet in the
2592                  * log tree or it's there but not up to date.
2593                  */
2594                 trans = btrfs_start_transaction(root, 1);
2595                 if (IS_ERR(trans)) {
2596                         err = PTR_ERR(trans);
2597                 } else {
2598                         err = btrfs_update_inode(trans, root, inode);
2599                         ret = btrfs_end_transaction(trans, root);
2600                 }
2601         }
2602         inode_unlock(inode);
2603         if (ret && !err)
2604                 err = ret;
2605         return err;
2606 }
2607
2608 /* Helper structure to record which range is already reserved */
2609 struct falloc_range {
2610         struct list_head list;
2611         u64 start;
2612         u64 len;
2613 };
2614
2615 /*
2616  * Helper function to add falloc range
2617  *
2618  * Caller should have locked the larger range of extent containing
2619  * [start, len)
2620  */
2621 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2622 {
2623         struct falloc_range *prev = NULL;
2624         struct falloc_range *range = NULL;
2625
2626         if (list_empty(head))
2627                 goto insert;
2628
2629         /*
2630          * As fallocate iterate by bytenr order, we only need to check
2631          * the last range.
2632          */
2633         prev = list_entry(head->prev, struct falloc_range, list);
2634         if (prev->start + prev->len == start) {
2635                 prev->len += len;
2636                 return 0;
2637         }
2638 insert:
2639         range = kmalloc(sizeof(*range), GFP_NOFS);
2640         if (!range)
2641                 return -ENOMEM;
2642         range->start = start;
2643         range->len = len;
2644         list_add_tail(&range->list, head);
2645         return 0;
2646 }
2647
2648 static long btrfs_fallocate(struct file *file, int mode,
2649                             loff_t offset, loff_t len)
2650 {
2651         struct inode *inode = file_inode(file);
2652         struct extent_state *cached_state = NULL;
2653         struct falloc_range *range;
2654         struct falloc_range *tmp;
2655         struct list_head reserve_list;
2656         u64 cur_offset;
2657         u64 last_byte;
2658         u64 alloc_start;
2659         u64 alloc_end;
2660         u64 alloc_hint = 0;
2661         u64 locked_end;
2662         u64 actual_end = 0;
2663         struct extent_map *em;
2664         int blocksize = BTRFS_I(inode)->root->sectorsize;
2665         int ret;
2666
2667         alloc_start = round_down(offset, blocksize);
2668         alloc_end = round_up(offset + len, blocksize);
2669
2670         /* Make sure we aren't being give some crap mode */
2671         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2672                 return -EOPNOTSUPP;
2673
2674         if (mode & FALLOC_FL_PUNCH_HOLE)
2675                 return btrfs_punch_hole(inode, offset, len);
2676
2677         /*
2678          * Only trigger disk allocation, don't trigger qgroup reserve
2679          *
2680          * For qgroup space, it will be checked later.
2681          */
2682         ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start);
2683         if (ret < 0)
2684                 return ret;
2685
2686         inode_lock(inode);
2687         ret = inode_newsize_ok(inode, alloc_end);
2688         if (ret)
2689                 goto out;
2690
2691         /*
2692          * TODO: Move these two operations after we have checked
2693          * accurate reserved space, or fallocate can still fail but
2694          * with page truncated or size expanded.
2695          *
2696          * But that's a minor problem and won't do much harm BTW.
2697          */
2698         if (alloc_start > inode->i_size) {
2699                 ret = btrfs_cont_expand(inode, i_size_read(inode),
2700                                         alloc_start);
2701                 if (ret)
2702                         goto out;
2703         } else if (offset + len > inode->i_size) {
2704                 /*
2705                  * If we are fallocating from the end of the file onward we
2706                  * need to zero out the end of the block if i_size lands in the
2707                  * middle of a block.
2708                  */
2709                 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
2710                 if (ret)
2711                         goto out;
2712         }
2713
2714         /*
2715          * wait for ordered IO before we have any locks.  We'll loop again
2716          * below with the locks held.
2717          */
2718         ret = btrfs_wait_ordered_range(inode, alloc_start,
2719                                        alloc_end - alloc_start);
2720         if (ret)
2721                 goto out;
2722
2723         locked_end = alloc_end - 1;
2724         while (1) {
2725                 struct btrfs_ordered_extent *ordered;
2726
2727                 /* the extent lock is ordered inside the running
2728                  * transaction
2729                  */
2730                 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
2731                                  locked_end, &cached_state);
2732                 ordered = btrfs_lookup_first_ordered_extent(inode,
2733                                                             alloc_end - 1);
2734                 if (ordered &&
2735                     ordered->file_offset + ordered->len > alloc_start &&
2736                     ordered->file_offset < alloc_end) {
2737                         btrfs_put_ordered_extent(ordered);
2738                         unlock_extent_cached(&BTRFS_I(inode)->io_tree,
2739                                              alloc_start, locked_end,
2740                                              &cached_state, GFP_NOFS);
2741                         /*
2742                          * we can't wait on the range with the transaction
2743                          * running or with the extent lock held
2744                          */
2745                         ret = btrfs_wait_ordered_range(inode, alloc_start,
2746                                                        alloc_end - alloc_start);
2747                         if (ret)
2748                                 goto out;
2749                 } else {
2750                         if (ordered)
2751                                 btrfs_put_ordered_extent(ordered);
2752                         break;
2753                 }
2754         }
2755
2756         /* First, check if we exceed the qgroup limit */
2757         INIT_LIST_HEAD(&reserve_list);
2758         cur_offset = alloc_start;
2759         while (1) {
2760                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2761                                       alloc_end - cur_offset, 0);
2762                 if (IS_ERR_OR_NULL(em)) {
2763                         if (!em)
2764                                 ret = -ENOMEM;
2765                         else
2766                                 ret = PTR_ERR(em);
2767                         break;
2768                 }
2769                 last_byte = min(extent_map_end(em), alloc_end);
2770                 actual_end = min_t(u64, extent_map_end(em), offset + len);
2771                 last_byte = ALIGN(last_byte, blocksize);
2772                 if (em->block_start == EXTENT_MAP_HOLE ||
2773                     (cur_offset >= inode->i_size &&
2774                      !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
2775                         ret = add_falloc_range(&reserve_list, cur_offset,
2776                                                last_byte - cur_offset);
2777                         if (ret < 0) {
2778                                 free_extent_map(em);
2779                                 break;
2780                         }
2781                         ret = btrfs_qgroup_reserve_data(inode, cur_offset,
2782                                         last_byte - cur_offset);
2783                         if (ret < 0)
2784                                 break;
2785                 }
2786                 free_extent_map(em);
2787                 cur_offset = last_byte;
2788                 if (cur_offset >= alloc_end)
2789                         break;
2790         }
2791
2792         /*
2793          * If ret is still 0, means we're OK to fallocate.
2794          * Or just cleanup the list and exit.
2795          */
2796         list_for_each_entry_safe(range, tmp, &reserve_list, list) {
2797                 if (!ret)
2798                         ret = btrfs_prealloc_file_range(inode, mode,
2799                                         range->start,
2800                                         range->len, 1 << inode->i_blkbits,
2801                                         offset + len, &alloc_hint);
2802                 list_del(&range->list);
2803                 kfree(range);
2804         }
2805         if (ret < 0)
2806                 goto out_unlock;
2807
2808         if (actual_end > inode->i_size &&
2809             !(mode & FALLOC_FL_KEEP_SIZE)) {
2810                 struct btrfs_trans_handle *trans;
2811                 struct btrfs_root *root = BTRFS_I(inode)->root;
2812
2813                 /*
2814                  * We didn't need to allocate any more space, but we
2815                  * still extended the size of the file so we need to
2816                  * update i_size and the inode item.
2817                  */
2818                 trans = btrfs_start_transaction(root, 1);
2819                 if (IS_ERR(trans)) {
2820                         ret = PTR_ERR(trans);
2821                 } else {
2822                         inode->i_ctime = CURRENT_TIME;
2823                         i_size_write(inode, actual_end);
2824                         btrfs_ordered_update_i_size(inode, actual_end, NULL);
2825                         ret = btrfs_update_inode(trans, root, inode);
2826                         if (ret)
2827                                 btrfs_end_transaction(trans, root);
2828                         else
2829                                 ret = btrfs_end_transaction(trans, root);
2830                 }
2831         }
2832 out_unlock:
2833         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
2834                              &cached_state, GFP_NOFS);
2835 out:
2836         /*
2837          * As we waited the extent range, the data_rsv_map must be empty
2838          * in the range, as written data range will be released from it.
2839          * And for prealloacted extent, it will also be released when
2840          * its metadata is written.
2841          * So this is completely used as cleanup.
2842          */
2843         btrfs_qgroup_free_data(inode, alloc_start, alloc_end - alloc_start);
2844         inode_unlock(inode);
2845         /* Let go of our reservation. */
2846         btrfs_free_reserved_data_space(inode, alloc_start,
2847                                        alloc_end - alloc_start);
2848         return ret;
2849 }
2850
2851 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2852 {
2853         struct btrfs_root *root = BTRFS_I(inode)->root;
2854         struct extent_map *em = NULL;
2855         struct extent_state *cached_state = NULL;
2856         u64 lockstart;
2857         u64 lockend;
2858         u64 start;
2859         u64 len;
2860         int ret = 0;
2861
2862         if (inode->i_size == 0)
2863                 return -ENXIO;
2864
2865         /*
2866          * *offset can be negative, in this case we start finding DATA/HOLE from
2867          * the very start of the file.
2868          */
2869         start = max_t(loff_t, 0, *offset);
2870
2871         lockstart = round_down(start, root->sectorsize);
2872         lockend = round_up(i_size_read(inode), root->sectorsize);
2873         if (lockend <= lockstart)
2874                 lockend = lockstart + root->sectorsize;
2875         lockend--;
2876         len = lockend - lockstart + 1;
2877
2878         lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2879                          &cached_state);
2880
2881         while (start < inode->i_size) {
2882                 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
2883                 if (IS_ERR(em)) {
2884                         ret = PTR_ERR(em);
2885                         em = NULL;
2886                         break;
2887                 }
2888
2889                 if (whence == SEEK_HOLE &&
2890                     (em->block_start == EXTENT_MAP_HOLE ||
2891                      test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2892                         break;
2893                 else if (whence == SEEK_DATA &&
2894                            (em->block_start != EXTENT_MAP_HOLE &&
2895                             !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
2896                         break;
2897
2898                 start = em->start + em->len;
2899                 free_extent_map(em);
2900                 em = NULL;
2901                 cond_resched();
2902         }
2903         free_extent_map(em);
2904         if (!ret) {
2905                 if (whence == SEEK_DATA && start >= inode->i_size)
2906                         ret = -ENXIO;
2907                 else
2908                         *offset = min_t(loff_t, start, inode->i_size);
2909         }
2910         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2911                              &cached_state, GFP_NOFS);
2912         return ret;
2913 }
2914
2915 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
2916 {
2917         struct inode *inode = file->f_mapping->host;
2918         int ret;
2919
2920         inode_lock(inode);
2921         switch (whence) {
2922         case SEEK_END:
2923         case SEEK_CUR:
2924                 offset = generic_file_llseek(file, offset, whence);
2925                 goto out;
2926         case SEEK_DATA:
2927         case SEEK_HOLE:
2928                 if (offset >= i_size_read(inode)) {
2929                         inode_unlock(inode);
2930                         return -ENXIO;
2931                 }
2932
2933                 ret = find_desired_extent(inode, &offset, whence);
2934                 if (ret) {
2935                         inode_unlock(inode);
2936                         return ret;
2937                 }
2938         }
2939
2940         offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2941 out:
2942         inode_unlock(inode);
2943         return offset;
2944 }
2945
2946 const struct file_operations btrfs_file_operations = {
2947         .llseek         = btrfs_file_llseek,
2948         .read_iter      = generic_file_read_iter,
2949         .splice_read    = generic_file_splice_read,
2950         .write_iter     = btrfs_file_write_iter,
2951         .mmap           = btrfs_file_mmap,
2952         .open           = generic_file_open,
2953         .release        = btrfs_release_file,
2954         .fsync          = btrfs_sync_file,
2955         .fallocate      = btrfs_fallocate,
2956         .unlocked_ioctl = btrfs_ioctl,
2957 #ifdef CONFIG_COMPAT
2958         .compat_ioctl   = btrfs_ioctl,
2959 #endif
2960         .copy_file_range = btrfs_copy_file_range,
2961         .clone_file_range = btrfs_clone_file_range,
2962         .dedupe_file_range = btrfs_dedupe_file_range,
2963 };
2964
2965 void btrfs_auto_defrag_exit(void)
2966 {
2967         if (btrfs_inode_defrag_cachep)
2968                 kmem_cache_destroy(btrfs_inode_defrag_cachep);
2969 }
2970
2971 int btrfs_auto_defrag_init(void)
2972 {
2973         btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
2974                                         sizeof(struct inode_defrag), 0,
2975                                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2976                                         NULL);
2977         if (!btrfs_inode_defrag_cachep)
2978                 return -ENOMEM;
2979
2980         return 0;
2981 }
2982
2983 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
2984 {
2985         int ret;
2986
2987         /*
2988          * So with compression we will find and lock a dirty page and clear the
2989          * first one as dirty, setup an async extent, and immediately return
2990          * with the entire range locked but with nobody actually marked with
2991          * writeback.  So we can't just filemap_write_and_wait_range() and
2992          * expect it to work since it will just kick off a thread to do the
2993          * actual work.  So we need to call filemap_fdatawrite_range _again_
2994          * since it will wait on the page lock, which won't be unlocked until
2995          * after the pages have been marked as writeback and so we're good to go
2996          * from there.  We have to do this otherwise we'll miss the ordered
2997          * extents and that results in badness.  Please Josef, do not think you
2998          * know better and pull this out at some point in the future, it is
2999          * right and you are wrong.
3000          */
3001         ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3002         if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3003                              &BTRFS_I(inode)->runtime_flags))
3004                 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3005
3006         return ret;
3007 }