5ce55f6eefceb8103ab6d8c7194c1797f0668ce3
[cascardo/linux.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include <linux/btrfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/posix_acl_xattr.h>
45 #include <linux/uio.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 #include "hash.h"
61 #include "props.h"
62 #include "qgroup.h"
63
64 struct btrfs_iget_args {
65         struct btrfs_key *location;
66         struct btrfs_root *root;
67 };
68
69 static const struct inode_operations btrfs_dir_inode_operations;
70 static const struct inode_operations btrfs_symlink_inode_operations;
71 static const struct inode_operations btrfs_dir_ro_inode_operations;
72 static const struct inode_operations btrfs_special_inode_operations;
73 static const struct inode_operations btrfs_file_inode_operations;
74 static const struct address_space_operations btrfs_aops;
75 static const struct address_space_operations btrfs_symlink_aops;
76 static const struct file_operations btrfs_dir_file_operations;
77 static struct extent_io_ops btrfs_extent_io_ops;
78
79 static struct kmem_cache *btrfs_inode_cachep;
80 static struct kmem_cache *btrfs_delalloc_work_cachep;
81 struct kmem_cache *btrfs_trans_handle_cachep;
82 struct kmem_cache *btrfs_transaction_cachep;
83 struct kmem_cache *btrfs_path_cachep;
84 struct kmem_cache *btrfs_free_space_cachep;
85
86 #define S_SHIFT 12
87 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
88         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
89         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
90         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
91         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
92         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
93         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
94         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
95 };
96
97 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
98 static int btrfs_truncate(struct inode *inode);
99 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
100 static noinline int cow_file_range(struct inode *inode,
101                                    struct page *locked_page,
102                                    u64 start, u64 end, int *page_started,
103                                    unsigned long *nr_written, int unlock);
104 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
105                                            u64 len, u64 orig_start,
106                                            u64 block_start, u64 block_len,
107                                            u64 orig_block_len, u64 ram_bytes,
108                                            int type);
109
110 static int btrfs_dirty_inode(struct inode *inode);
111
112 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
113 void btrfs_test_inode_set_ops(struct inode *inode)
114 {
115         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
116 }
117 #endif
118
119 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
120                                      struct inode *inode,  struct inode *dir,
121                                      const struct qstr *qstr)
122 {
123         int err;
124
125         err = btrfs_init_acl(trans, inode, dir);
126         if (!err)
127                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
128         return err;
129 }
130
131 /*
132  * this does all the hard work for inserting an inline extent into
133  * the btree.  The caller should have done a btrfs_drop_extents so that
134  * no overlapping inline items exist in the btree
135  */
136 static int insert_inline_extent(struct btrfs_trans_handle *trans,
137                                 struct btrfs_path *path, int extent_inserted,
138                                 struct btrfs_root *root, struct inode *inode,
139                                 u64 start, size_t size, size_t compressed_size,
140                                 int compress_type,
141                                 struct page **compressed_pages)
142 {
143         struct extent_buffer *leaf;
144         struct page *page = NULL;
145         char *kaddr;
146         unsigned long ptr;
147         struct btrfs_file_extent_item *ei;
148         int err = 0;
149         int ret;
150         size_t cur_size = size;
151         unsigned long offset;
152
153         if (compressed_size && compressed_pages)
154                 cur_size = compressed_size;
155
156         inode_add_bytes(inode, size);
157
158         if (!extent_inserted) {
159                 struct btrfs_key key;
160                 size_t datasize;
161
162                 key.objectid = btrfs_ino(inode);
163                 key.offset = start;
164                 key.type = BTRFS_EXTENT_DATA_KEY;
165
166                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
167                 path->leave_spinning = 1;
168                 ret = btrfs_insert_empty_item(trans, root, path, &key,
169                                               datasize);
170                 if (ret) {
171                         err = ret;
172                         goto fail;
173                 }
174         }
175         leaf = path->nodes[0];
176         ei = btrfs_item_ptr(leaf, path->slots[0],
177                             struct btrfs_file_extent_item);
178         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
179         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
180         btrfs_set_file_extent_encryption(leaf, ei, 0);
181         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
182         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
183         ptr = btrfs_file_extent_inline_start(ei);
184
185         if (compress_type != BTRFS_COMPRESS_NONE) {
186                 struct page *cpage;
187                 int i = 0;
188                 while (compressed_size > 0) {
189                         cpage = compressed_pages[i];
190                         cur_size = min_t(unsigned long, compressed_size,
191                                        PAGE_CACHE_SIZE);
192
193                         kaddr = kmap_atomic(cpage);
194                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
195                         kunmap_atomic(kaddr);
196
197                         i++;
198                         ptr += cur_size;
199                         compressed_size -= cur_size;
200                 }
201                 btrfs_set_file_extent_compression(leaf, ei,
202                                                   compress_type);
203         } else {
204                 page = find_get_page(inode->i_mapping,
205                                      start >> PAGE_CACHE_SHIFT);
206                 btrfs_set_file_extent_compression(leaf, ei, 0);
207                 kaddr = kmap_atomic(page);
208                 offset = start & (PAGE_CACHE_SIZE - 1);
209                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
210                 kunmap_atomic(kaddr);
211                 page_cache_release(page);
212         }
213         btrfs_mark_buffer_dirty(leaf);
214         btrfs_release_path(path);
215
216         /*
217          * we're an inline extent, so nobody can
218          * extend the file past i_size without locking
219          * a page we already have locked.
220          *
221          * We must do any isize and inode updates
222          * before we unlock the pages.  Otherwise we
223          * could end up racing with unlink.
224          */
225         BTRFS_I(inode)->disk_i_size = inode->i_size;
226         ret = btrfs_update_inode(trans, root, inode);
227
228         return ret;
229 fail:
230         return err;
231 }
232
233
234 /*
235  * conditionally insert an inline extent into the file.  This
236  * does the checks required to make sure the data is small enough
237  * to fit as an inline extent.
238  */
239 static noinline int cow_file_range_inline(struct btrfs_root *root,
240                                           struct inode *inode, u64 start,
241                                           u64 end, size_t compressed_size,
242                                           int compress_type,
243                                           struct page **compressed_pages)
244 {
245         struct btrfs_trans_handle *trans;
246         u64 isize = i_size_read(inode);
247         u64 actual_end = min(end + 1, isize);
248         u64 inline_len = actual_end - start;
249         u64 aligned_end = ALIGN(end, root->sectorsize);
250         u64 data_len = inline_len;
251         int ret;
252         struct btrfs_path *path;
253         int extent_inserted = 0;
254         u32 extent_item_size;
255
256         if (compressed_size)
257                 data_len = compressed_size;
258
259         if (start > 0 ||
260             actual_end > PAGE_CACHE_SIZE ||
261             data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
262             (!compressed_size &&
263             (actual_end & (root->sectorsize - 1)) == 0) ||
264             end + 1 < isize ||
265             data_len > root->fs_info->max_inline) {
266                 return 1;
267         }
268
269         path = btrfs_alloc_path();
270         if (!path)
271                 return -ENOMEM;
272
273         trans = btrfs_join_transaction(root);
274         if (IS_ERR(trans)) {
275                 btrfs_free_path(path);
276                 return PTR_ERR(trans);
277         }
278         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
279
280         if (compressed_size && compressed_pages)
281                 extent_item_size = btrfs_file_extent_calc_inline_size(
282                    compressed_size);
283         else
284                 extent_item_size = btrfs_file_extent_calc_inline_size(
285                     inline_len);
286
287         ret = __btrfs_drop_extents(trans, root, inode, path,
288                                    start, aligned_end, NULL,
289                                    1, 1, extent_item_size, &extent_inserted);
290         if (ret) {
291                 btrfs_abort_transaction(trans, root, ret);
292                 goto out;
293         }
294
295         if (isize > actual_end)
296                 inline_len = min_t(u64, isize, actual_end);
297         ret = insert_inline_extent(trans, path, extent_inserted,
298                                    root, inode, start,
299                                    inline_len, compressed_size,
300                                    compress_type, compressed_pages);
301         if (ret && ret != -ENOSPC) {
302                 btrfs_abort_transaction(trans, root, ret);
303                 goto out;
304         } else if (ret == -ENOSPC) {
305                 ret = 1;
306                 goto out;
307         }
308
309         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
310         btrfs_delalloc_release_metadata(inode, end + 1 - start);
311         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
312 out:
313         btrfs_free_path(path);
314         btrfs_end_transaction(trans, root);
315         return ret;
316 }
317
318 struct async_extent {
319         u64 start;
320         u64 ram_size;
321         u64 compressed_size;
322         struct page **pages;
323         unsigned long nr_pages;
324         int compress_type;
325         struct list_head list;
326 };
327
328 struct async_cow {
329         struct inode *inode;
330         struct btrfs_root *root;
331         struct page *locked_page;
332         u64 start;
333         u64 end;
334         struct list_head extents;
335         struct btrfs_work work;
336 };
337
338 static noinline int add_async_extent(struct async_cow *cow,
339                                      u64 start, u64 ram_size,
340                                      u64 compressed_size,
341                                      struct page **pages,
342                                      unsigned long nr_pages,
343                                      int compress_type)
344 {
345         struct async_extent *async_extent;
346
347         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
348         BUG_ON(!async_extent); /* -ENOMEM */
349         async_extent->start = start;
350         async_extent->ram_size = ram_size;
351         async_extent->compressed_size = compressed_size;
352         async_extent->pages = pages;
353         async_extent->nr_pages = nr_pages;
354         async_extent->compress_type = compress_type;
355         list_add_tail(&async_extent->list, &cow->extents);
356         return 0;
357 }
358
359 static inline int inode_need_compress(struct inode *inode)
360 {
361         struct btrfs_root *root = BTRFS_I(inode)->root;
362
363         /* force compress */
364         if (btrfs_test_opt(root, FORCE_COMPRESS))
365                 return 1;
366         /* bad compression ratios */
367         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
368                 return 0;
369         if (btrfs_test_opt(root, COMPRESS) ||
370             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
371             BTRFS_I(inode)->force_compress)
372                 return 1;
373         return 0;
374 }
375
376 /*
377  * we create compressed extents in two phases.  The first
378  * phase compresses a range of pages that have already been
379  * locked (both pages and state bits are locked).
380  *
381  * This is done inside an ordered work queue, and the compression
382  * is spread across many cpus.  The actual IO submission is step
383  * two, and the ordered work queue takes care of making sure that
384  * happens in the same order things were put onto the queue by
385  * writepages and friends.
386  *
387  * If this code finds it can't get good compression, it puts an
388  * entry onto the work queue to write the uncompressed bytes.  This
389  * makes sure that both compressed inodes and uncompressed inodes
390  * are written in the same order that the flusher thread sent them
391  * down.
392  */
393 static noinline void compress_file_range(struct inode *inode,
394                                         struct page *locked_page,
395                                         u64 start, u64 end,
396                                         struct async_cow *async_cow,
397                                         int *num_added)
398 {
399         struct btrfs_root *root = BTRFS_I(inode)->root;
400         u64 num_bytes;
401         u64 blocksize = root->sectorsize;
402         u64 actual_end;
403         u64 isize = i_size_read(inode);
404         int ret = 0;
405         struct page **pages = NULL;
406         unsigned long nr_pages;
407         unsigned long nr_pages_ret = 0;
408         unsigned long total_compressed = 0;
409         unsigned long total_in = 0;
410         unsigned long max_compressed = 128 * 1024;
411         unsigned long max_uncompressed = 128 * 1024;
412         int i;
413         int will_compress;
414         int compress_type = root->fs_info->compress_type;
415         int redirty = 0;
416
417         /* if this is a small write inside eof, kick off a defrag */
418         if ((end - start + 1) < 16 * 1024 &&
419             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
420                 btrfs_add_inode_defrag(NULL, inode);
421
422         actual_end = min_t(u64, isize, end + 1);
423 again:
424         will_compress = 0;
425         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
426         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
427
428         /*
429          * we don't want to send crud past the end of i_size through
430          * compression, that's just a waste of CPU time.  So, if the
431          * end of the file is before the start of our current
432          * requested range of bytes, we bail out to the uncompressed
433          * cleanup code that can deal with all of this.
434          *
435          * It isn't really the fastest way to fix things, but this is a
436          * very uncommon corner.
437          */
438         if (actual_end <= start)
439                 goto cleanup_and_bail_uncompressed;
440
441         total_compressed = actual_end - start;
442
443         /*
444          * skip compression for a small file range(<=blocksize) that
445          * isn't an inline extent, since it dosen't save disk space at all.
446          */
447         if (total_compressed <= blocksize &&
448            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
449                 goto cleanup_and_bail_uncompressed;
450
451         /* we want to make sure that amount of ram required to uncompress
452          * an extent is reasonable, so we limit the total size in ram
453          * of a compressed extent to 128k.  This is a crucial number
454          * because it also controls how easily we can spread reads across
455          * cpus for decompression.
456          *
457          * We also want to make sure the amount of IO required to do
458          * a random read is reasonably small, so we limit the size of
459          * a compressed extent to 128k.
460          */
461         total_compressed = min(total_compressed, max_uncompressed);
462         num_bytes = ALIGN(end - start + 1, blocksize);
463         num_bytes = max(blocksize,  num_bytes);
464         total_in = 0;
465         ret = 0;
466
467         /*
468          * we do compression for mount -o compress and when the
469          * inode has not been flagged as nocompress.  This flag can
470          * change at any time if we discover bad compression ratios.
471          */
472         if (inode_need_compress(inode)) {
473                 WARN_ON(pages);
474                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
475                 if (!pages) {
476                         /* just bail out to the uncompressed code */
477                         goto cont;
478                 }
479
480                 if (BTRFS_I(inode)->force_compress)
481                         compress_type = BTRFS_I(inode)->force_compress;
482
483                 /*
484                  * we need to call clear_page_dirty_for_io on each
485                  * page in the range.  Otherwise applications with the file
486                  * mmap'd can wander in and change the page contents while
487                  * we are compressing them.
488                  *
489                  * If the compression fails for any reason, we set the pages
490                  * dirty again later on.
491                  */
492                 extent_range_clear_dirty_for_io(inode, start, end);
493                 redirty = 1;
494                 ret = btrfs_compress_pages(compress_type,
495                                            inode->i_mapping, start,
496                                            total_compressed, pages,
497                                            nr_pages, &nr_pages_ret,
498                                            &total_in,
499                                            &total_compressed,
500                                            max_compressed);
501
502                 if (!ret) {
503                         unsigned long offset = total_compressed &
504                                 (PAGE_CACHE_SIZE - 1);
505                         struct page *page = pages[nr_pages_ret - 1];
506                         char *kaddr;
507
508                         /* zero the tail end of the last page, we might be
509                          * sending it down to disk
510                          */
511                         if (offset) {
512                                 kaddr = kmap_atomic(page);
513                                 memset(kaddr + offset, 0,
514                                        PAGE_CACHE_SIZE - offset);
515                                 kunmap_atomic(kaddr);
516                         }
517                         will_compress = 1;
518                 }
519         }
520 cont:
521         if (start == 0) {
522                 /* lets try to make an inline extent */
523                 if (ret || total_in < (actual_end - start)) {
524                         /* we didn't compress the entire range, try
525                          * to make an uncompressed inline extent.
526                          */
527                         ret = cow_file_range_inline(root, inode, start, end,
528                                                     0, 0, NULL);
529                 } else {
530                         /* try making a compressed inline extent */
531                         ret = cow_file_range_inline(root, inode, start, end,
532                                                     total_compressed,
533                                                     compress_type, pages);
534                 }
535                 if (ret <= 0) {
536                         unsigned long clear_flags = EXTENT_DELALLOC |
537                                 EXTENT_DEFRAG;
538                         unsigned long page_error_op;
539
540                         clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
541                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
542
543                         /*
544                          * inline extent creation worked or returned error,
545                          * we don't need to create any more async work items.
546                          * Unlock and free up our temp pages.
547                          */
548                         extent_clear_unlock_delalloc(inode, start, end, NULL,
549                                                      clear_flags, PAGE_UNLOCK |
550                                                      PAGE_CLEAR_DIRTY |
551                                                      PAGE_SET_WRITEBACK |
552                                                      page_error_op |
553                                                      PAGE_END_WRITEBACK);
554                         goto free_pages_out;
555                 }
556         }
557
558         if (will_compress) {
559                 /*
560                  * we aren't doing an inline extent round the compressed size
561                  * up to a block size boundary so the allocator does sane
562                  * things
563                  */
564                 total_compressed = ALIGN(total_compressed, blocksize);
565
566                 /*
567                  * one last check to make sure the compression is really a
568                  * win, compare the page count read with the blocks on disk
569                  */
570                 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
571                 if (total_compressed >= total_in) {
572                         will_compress = 0;
573                 } else {
574                         num_bytes = total_in;
575                 }
576         }
577         if (!will_compress && pages) {
578                 /*
579                  * the compression code ran but failed to make things smaller,
580                  * free any pages it allocated and our page pointer array
581                  */
582                 for (i = 0; i < nr_pages_ret; i++) {
583                         WARN_ON(pages[i]->mapping);
584                         page_cache_release(pages[i]);
585                 }
586                 kfree(pages);
587                 pages = NULL;
588                 total_compressed = 0;
589                 nr_pages_ret = 0;
590
591                 /* flag the file so we don't compress in the future */
592                 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
593                     !(BTRFS_I(inode)->force_compress)) {
594                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
595                 }
596         }
597         if (will_compress) {
598                 *num_added += 1;
599
600                 /* the async work queues will take care of doing actual
601                  * allocation on disk for these compressed pages,
602                  * and will submit them to the elevator.
603                  */
604                 add_async_extent(async_cow, start, num_bytes,
605                                  total_compressed, pages, nr_pages_ret,
606                                  compress_type);
607
608                 if (start + num_bytes < end) {
609                         start += num_bytes;
610                         pages = NULL;
611                         cond_resched();
612                         goto again;
613                 }
614         } else {
615 cleanup_and_bail_uncompressed:
616                 /*
617                  * No compression, but we still need to write the pages in
618                  * the file we've been given so far.  redirty the locked
619                  * page if it corresponds to our extent and set things up
620                  * for the async work queue to run cow_file_range to do
621                  * the normal delalloc dance
622                  */
623                 if (page_offset(locked_page) >= start &&
624                     page_offset(locked_page) <= end) {
625                         __set_page_dirty_nobuffers(locked_page);
626                         /* unlocked later on in the async handlers */
627                 }
628                 if (redirty)
629                         extent_range_redirty_for_io(inode, start, end);
630                 add_async_extent(async_cow, start, end - start + 1,
631                                  0, NULL, 0, BTRFS_COMPRESS_NONE);
632                 *num_added += 1;
633         }
634
635         return;
636
637 free_pages_out:
638         for (i = 0; i < nr_pages_ret; i++) {
639                 WARN_ON(pages[i]->mapping);
640                 page_cache_release(pages[i]);
641         }
642         kfree(pages);
643 }
644
645 static void free_async_extent_pages(struct async_extent *async_extent)
646 {
647         int i;
648
649         if (!async_extent->pages)
650                 return;
651
652         for (i = 0; i < async_extent->nr_pages; i++) {
653                 WARN_ON(async_extent->pages[i]->mapping);
654                 page_cache_release(async_extent->pages[i]);
655         }
656         kfree(async_extent->pages);
657         async_extent->nr_pages = 0;
658         async_extent->pages = NULL;
659 }
660
661 /*
662  * phase two of compressed writeback.  This is the ordered portion
663  * of the code, which only gets called in the order the work was
664  * queued.  We walk all the async extents created by compress_file_range
665  * and send them down to the disk.
666  */
667 static noinline void submit_compressed_extents(struct inode *inode,
668                                               struct async_cow *async_cow)
669 {
670         struct async_extent *async_extent;
671         u64 alloc_hint = 0;
672         struct btrfs_key ins;
673         struct extent_map *em;
674         struct btrfs_root *root = BTRFS_I(inode)->root;
675         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
676         struct extent_io_tree *io_tree;
677         int ret = 0;
678
679 again:
680         while (!list_empty(&async_cow->extents)) {
681                 async_extent = list_entry(async_cow->extents.next,
682                                           struct async_extent, list);
683                 list_del(&async_extent->list);
684
685                 io_tree = &BTRFS_I(inode)->io_tree;
686
687 retry:
688                 /* did the compression code fall back to uncompressed IO? */
689                 if (!async_extent->pages) {
690                         int page_started = 0;
691                         unsigned long nr_written = 0;
692
693                         lock_extent(io_tree, async_extent->start,
694                                          async_extent->start +
695                                          async_extent->ram_size - 1);
696
697                         /* allocate blocks */
698                         ret = cow_file_range(inode, async_cow->locked_page,
699                                              async_extent->start,
700                                              async_extent->start +
701                                              async_extent->ram_size - 1,
702                                              &page_started, &nr_written, 0);
703
704                         /* JDM XXX */
705
706                         /*
707                          * if page_started, cow_file_range inserted an
708                          * inline extent and took care of all the unlocking
709                          * and IO for us.  Otherwise, we need to submit
710                          * all those pages down to the drive.
711                          */
712                         if (!page_started && !ret)
713                                 extent_write_locked_range(io_tree,
714                                                   inode, async_extent->start,
715                                                   async_extent->start +
716                                                   async_extent->ram_size - 1,
717                                                   btrfs_get_extent,
718                                                   WB_SYNC_ALL);
719                         else if (ret)
720                                 unlock_page(async_cow->locked_page);
721                         kfree(async_extent);
722                         cond_resched();
723                         continue;
724                 }
725
726                 lock_extent(io_tree, async_extent->start,
727                             async_extent->start + async_extent->ram_size - 1);
728
729                 ret = btrfs_reserve_extent(root,
730                                            async_extent->compressed_size,
731                                            async_extent->compressed_size,
732                                            0, alloc_hint, &ins, 1, 1);
733                 if (ret) {
734                         free_async_extent_pages(async_extent);
735
736                         if (ret == -ENOSPC) {
737                                 unlock_extent(io_tree, async_extent->start,
738                                               async_extent->start +
739                                               async_extent->ram_size - 1);
740
741                                 /*
742                                  * we need to redirty the pages if we decide to
743                                  * fallback to uncompressed IO, otherwise we
744                                  * will not submit these pages down to lower
745                                  * layers.
746                                  */
747                                 extent_range_redirty_for_io(inode,
748                                                 async_extent->start,
749                                                 async_extent->start +
750                                                 async_extent->ram_size - 1);
751
752                                 goto retry;
753                         }
754                         goto out_free;
755                 }
756                 /*
757                  * here we're doing allocation and writeback of the
758                  * compressed pages
759                  */
760                 btrfs_drop_extent_cache(inode, async_extent->start,
761                                         async_extent->start +
762                                         async_extent->ram_size - 1, 0);
763
764                 em = alloc_extent_map();
765                 if (!em) {
766                         ret = -ENOMEM;
767                         goto out_free_reserve;
768                 }
769                 em->start = async_extent->start;
770                 em->len = async_extent->ram_size;
771                 em->orig_start = em->start;
772                 em->mod_start = em->start;
773                 em->mod_len = em->len;
774
775                 em->block_start = ins.objectid;
776                 em->block_len = ins.offset;
777                 em->orig_block_len = ins.offset;
778                 em->ram_bytes = async_extent->ram_size;
779                 em->bdev = root->fs_info->fs_devices->latest_bdev;
780                 em->compress_type = async_extent->compress_type;
781                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
782                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
783                 em->generation = -1;
784
785                 while (1) {
786                         write_lock(&em_tree->lock);
787                         ret = add_extent_mapping(em_tree, em, 1);
788                         write_unlock(&em_tree->lock);
789                         if (ret != -EEXIST) {
790                                 free_extent_map(em);
791                                 break;
792                         }
793                         btrfs_drop_extent_cache(inode, async_extent->start,
794                                                 async_extent->start +
795                                                 async_extent->ram_size - 1, 0);
796                 }
797
798                 if (ret)
799                         goto out_free_reserve;
800
801                 ret = btrfs_add_ordered_extent_compress(inode,
802                                                 async_extent->start,
803                                                 ins.objectid,
804                                                 async_extent->ram_size,
805                                                 ins.offset,
806                                                 BTRFS_ORDERED_COMPRESSED,
807                                                 async_extent->compress_type);
808                 if (ret) {
809                         btrfs_drop_extent_cache(inode, async_extent->start,
810                                                 async_extent->start +
811                                                 async_extent->ram_size - 1, 0);
812                         goto out_free_reserve;
813                 }
814
815                 /*
816                  * clear dirty, set writeback and unlock the pages.
817                  */
818                 extent_clear_unlock_delalloc(inode, async_extent->start,
819                                 async_extent->start +
820                                 async_extent->ram_size - 1,
821                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
822                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
823                                 PAGE_SET_WRITEBACK);
824                 ret = btrfs_submit_compressed_write(inode,
825                                     async_extent->start,
826                                     async_extent->ram_size,
827                                     ins.objectid,
828                                     ins.offset, async_extent->pages,
829                                     async_extent->nr_pages);
830                 if (ret) {
831                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
832                         struct page *p = async_extent->pages[0];
833                         const u64 start = async_extent->start;
834                         const u64 end = start + async_extent->ram_size - 1;
835
836                         p->mapping = inode->i_mapping;
837                         tree->ops->writepage_end_io_hook(p, start, end,
838                                                          NULL, 0);
839                         p->mapping = NULL;
840                         extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
841                                                      PAGE_END_WRITEBACK |
842                                                      PAGE_SET_ERROR);
843                         free_async_extent_pages(async_extent);
844                 }
845                 alloc_hint = ins.objectid + ins.offset;
846                 kfree(async_extent);
847                 cond_resched();
848         }
849         return;
850 out_free_reserve:
851         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
852 out_free:
853         extent_clear_unlock_delalloc(inode, async_extent->start,
854                                      async_extent->start +
855                                      async_extent->ram_size - 1,
856                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
857                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
858                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
859                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
860                                      PAGE_SET_ERROR);
861         free_async_extent_pages(async_extent);
862         kfree(async_extent);
863         goto again;
864 }
865
866 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
867                                       u64 num_bytes)
868 {
869         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
870         struct extent_map *em;
871         u64 alloc_hint = 0;
872
873         read_lock(&em_tree->lock);
874         em = search_extent_mapping(em_tree, start, num_bytes);
875         if (em) {
876                 /*
877                  * if block start isn't an actual block number then find the
878                  * first block in this inode and use that as a hint.  If that
879                  * block is also bogus then just don't worry about it.
880                  */
881                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
882                         free_extent_map(em);
883                         em = search_extent_mapping(em_tree, 0, 0);
884                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
885                                 alloc_hint = em->block_start;
886                         if (em)
887                                 free_extent_map(em);
888                 } else {
889                         alloc_hint = em->block_start;
890                         free_extent_map(em);
891                 }
892         }
893         read_unlock(&em_tree->lock);
894
895         return alloc_hint;
896 }
897
898 /*
899  * when extent_io.c finds a delayed allocation range in the file,
900  * the call backs end up in this code.  The basic idea is to
901  * allocate extents on disk for the range, and create ordered data structs
902  * in ram to track those extents.
903  *
904  * locked_page is the page that writepage had locked already.  We use
905  * it to make sure we don't do extra locks or unlocks.
906  *
907  * *page_started is set to one if we unlock locked_page and do everything
908  * required to start IO on it.  It may be clean and already done with
909  * IO when we return.
910  */
911 static noinline int cow_file_range(struct inode *inode,
912                                    struct page *locked_page,
913                                    u64 start, u64 end, int *page_started,
914                                    unsigned long *nr_written,
915                                    int unlock)
916 {
917         struct btrfs_root *root = BTRFS_I(inode)->root;
918         u64 alloc_hint = 0;
919         u64 num_bytes;
920         unsigned long ram_size;
921         u64 disk_num_bytes;
922         u64 cur_alloc_size;
923         u64 blocksize = root->sectorsize;
924         struct btrfs_key ins;
925         struct extent_map *em;
926         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
927         int ret = 0;
928
929         if (btrfs_is_free_space_inode(inode)) {
930                 WARN_ON_ONCE(1);
931                 ret = -EINVAL;
932                 goto out_unlock;
933         }
934
935         num_bytes = ALIGN(end - start + 1, blocksize);
936         num_bytes = max(blocksize,  num_bytes);
937         disk_num_bytes = num_bytes;
938
939         /* if this is a small write inside eof, kick off defrag */
940         if (num_bytes < 64 * 1024 &&
941             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
942                 btrfs_add_inode_defrag(NULL, inode);
943
944         if (start == 0) {
945                 /* lets try to make an inline extent */
946                 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
947                                             NULL);
948                 if (ret == 0) {
949                         extent_clear_unlock_delalloc(inode, start, end, NULL,
950                                      EXTENT_LOCKED | EXTENT_DELALLOC |
951                                      EXTENT_DEFRAG, PAGE_UNLOCK |
952                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
953                                      PAGE_END_WRITEBACK);
954
955                         *nr_written = *nr_written +
956                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
957                         *page_started = 1;
958                         goto out;
959                 } else if (ret < 0) {
960                         goto out_unlock;
961                 }
962         }
963
964         BUG_ON(disk_num_bytes >
965                btrfs_super_total_bytes(root->fs_info->super_copy));
966
967         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
968         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
969
970         while (disk_num_bytes > 0) {
971                 unsigned long op;
972
973                 cur_alloc_size = disk_num_bytes;
974                 ret = btrfs_reserve_extent(root, cur_alloc_size,
975                                            root->sectorsize, 0, alloc_hint,
976                                            &ins, 1, 1);
977                 if (ret < 0)
978                         goto out_unlock;
979
980                 em = alloc_extent_map();
981                 if (!em) {
982                         ret = -ENOMEM;
983                         goto out_reserve;
984                 }
985                 em->start = start;
986                 em->orig_start = em->start;
987                 ram_size = ins.offset;
988                 em->len = ins.offset;
989                 em->mod_start = em->start;
990                 em->mod_len = em->len;
991
992                 em->block_start = ins.objectid;
993                 em->block_len = ins.offset;
994                 em->orig_block_len = ins.offset;
995                 em->ram_bytes = ram_size;
996                 em->bdev = root->fs_info->fs_devices->latest_bdev;
997                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
998                 em->generation = -1;
999
1000                 while (1) {
1001                         write_lock(&em_tree->lock);
1002                         ret = add_extent_mapping(em_tree, em, 1);
1003                         write_unlock(&em_tree->lock);
1004                         if (ret != -EEXIST) {
1005                                 free_extent_map(em);
1006                                 break;
1007                         }
1008                         btrfs_drop_extent_cache(inode, start,
1009                                                 start + ram_size - 1, 0);
1010                 }
1011                 if (ret)
1012                         goto out_reserve;
1013
1014                 cur_alloc_size = ins.offset;
1015                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1016                                                ram_size, cur_alloc_size, 0);
1017                 if (ret)
1018                         goto out_drop_extent_cache;
1019
1020                 if (root->root_key.objectid ==
1021                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1022                         ret = btrfs_reloc_clone_csums(inode, start,
1023                                                       cur_alloc_size);
1024                         if (ret)
1025                                 goto out_drop_extent_cache;
1026                 }
1027
1028                 if (disk_num_bytes < cur_alloc_size)
1029                         break;
1030
1031                 /* we're not doing compressed IO, don't unlock the first
1032                  * page (which the caller expects to stay locked), don't
1033                  * clear any dirty bits and don't set any writeback bits
1034                  *
1035                  * Do set the Private2 bit so we know this page was properly
1036                  * setup for writepage
1037                  */
1038                 op = unlock ? PAGE_UNLOCK : 0;
1039                 op |= PAGE_SET_PRIVATE2;
1040
1041                 extent_clear_unlock_delalloc(inode, start,
1042                                              start + ram_size - 1, locked_page,
1043                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1044                                              op);
1045                 disk_num_bytes -= cur_alloc_size;
1046                 num_bytes -= cur_alloc_size;
1047                 alloc_hint = ins.objectid + ins.offset;
1048                 start += cur_alloc_size;
1049         }
1050 out:
1051         return ret;
1052
1053 out_drop_extent_cache:
1054         btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1055 out_reserve:
1056         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
1057 out_unlock:
1058         extent_clear_unlock_delalloc(inode, start, end, locked_page,
1059                                      EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1060                                      EXTENT_DELALLOC | EXTENT_DEFRAG,
1061                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1062                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1063         goto out;
1064 }
1065
1066 /*
1067  * work queue call back to started compression on a file and pages
1068  */
1069 static noinline void async_cow_start(struct btrfs_work *work)
1070 {
1071         struct async_cow *async_cow;
1072         int num_added = 0;
1073         async_cow = container_of(work, struct async_cow, work);
1074
1075         compress_file_range(async_cow->inode, async_cow->locked_page,
1076                             async_cow->start, async_cow->end, async_cow,
1077                             &num_added);
1078         if (num_added == 0) {
1079                 btrfs_add_delayed_iput(async_cow->inode);
1080                 async_cow->inode = NULL;
1081         }
1082 }
1083
1084 /*
1085  * work queue call back to submit previously compressed pages
1086  */
1087 static noinline void async_cow_submit(struct btrfs_work *work)
1088 {
1089         struct async_cow *async_cow;
1090         struct btrfs_root *root;
1091         unsigned long nr_pages;
1092
1093         async_cow = container_of(work, struct async_cow, work);
1094
1095         root = async_cow->root;
1096         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1097                 PAGE_CACHE_SHIFT;
1098
1099         /*
1100          * atomic_sub_return implies a barrier for waitqueue_active
1101          */
1102         if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1103             5 * 1024 * 1024 &&
1104             waitqueue_active(&root->fs_info->async_submit_wait))
1105                 wake_up(&root->fs_info->async_submit_wait);
1106
1107         if (async_cow->inode)
1108                 submit_compressed_extents(async_cow->inode, async_cow);
1109 }
1110
1111 static noinline void async_cow_free(struct btrfs_work *work)
1112 {
1113         struct async_cow *async_cow;
1114         async_cow = container_of(work, struct async_cow, work);
1115         if (async_cow->inode)
1116                 btrfs_add_delayed_iput(async_cow->inode);
1117         kfree(async_cow);
1118 }
1119
1120 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1121                                 u64 start, u64 end, int *page_started,
1122                                 unsigned long *nr_written)
1123 {
1124         struct async_cow *async_cow;
1125         struct btrfs_root *root = BTRFS_I(inode)->root;
1126         unsigned long nr_pages;
1127         u64 cur_end;
1128         int limit = 10 * 1024 * 1024;
1129
1130         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1131                          1, 0, NULL, GFP_NOFS);
1132         while (start < end) {
1133                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1134                 BUG_ON(!async_cow); /* -ENOMEM */
1135                 async_cow->inode = igrab(inode);
1136                 async_cow->root = root;
1137                 async_cow->locked_page = locked_page;
1138                 async_cow->start = start;
1139
1140                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1141                     !btrfs_test_opt(root, FORCE_COMPRESS))
1142                         cur_end = end;
1143                 else
1144                         cur_end = min(end, start + 512 * 1024 - 1);
1145
1146                 async_cow->end = cur_end;
1147                 INIT_LIST_HEAD(&async_cow->extents);
1148
1149                 btrfs_init_work(&async_cow->work,
1150                                 btrfs_delalloc_helper,
1151                                 async_cow_start, async_cow_submit,
1152                                 async_cow_free);
1153
1154                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1155                         PAGE_CACHE_SHIFT;
1156                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1157
1158                 btrfs_queue_work(root->fs_info->delalloc_workers,
1159                                  &async_cow->work);
1160
1161                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1162                         wait_event(root->fs_info->async_submit_wait,
1163                            (atomic_read(&root->fs_info->async_delalloc_pages) <
1164                             limit));
1165                 }
1166
1167                 while (atomic_read(&root->fs_info->async_submit_draining) &&
1168                       atomic_read(&root->fs_info->async_delalloc_pages)) {
1169                         wait_event(root->fs_info->async_submit_wait,
1170                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
1171                            0));
1172                 }
1173
1174                 *nr_written += nr_pages;
1175                 start = cur_end + 1;
1176         }
1177         *page_started = 1;
1178         return 0;
1179 }
1180
1181 static noinline int csum_exist_in_range(struct btrfs_root *root,
1182                                         u64 bytenr, u64 num_bytes)
1183 {
1184         int ret;
1185         struct btrfs_ordered_sum *sums;
1186         LIST_HEAD(list);
1187
1188         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1189                                        bytenr + num_bytes - 1, &list, 0);
1190         if (ret == 0 && list_empty(&list))
1191                 return 0;
1192
1193         while (!list_empty(&list)) {
1194                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1195                 list_del(&sums->list);
1196                 kfree(sums);
1197         }
1198         return 1;
1199 }
1200
1201 /*
1202  * when nowcow writeback call back.  This checks for snapshots or COW copies
1203  * of the extents that exist in the file, and COWs the file as required.
1204  *
1205  * If no cow copies or snapshots exist, we write directly to the existing
1206  * blocks on disk
1207  */
1208 static noinline int run_delalloc_nocow(struct inode *inode,
1209                                        struct page *locked_page,
1210                               u64 start, u64 end, int *page_started, int force,
1211                               unsigned long *nr_written)
1212 {
1213         struct btrfs_root *root = BTRFS_I(inode)->root;
1214         struct btrfs_trans_handle *trans;
1215         struct extent_buffer *leaf;
1216         struct btrfs_path *path;
1217         struct btrfs_file_extent_item *fi;
1218         struct btrfs_key found_key;
1219         u64 cow_start;
1220         u64 cur_offset;
1221         u64 extent_end;
1222         u64 extent_offset;
1223         u64 disk_bytenr;
1224         u64 num_bytes;
1225         u64 disk_num_bytes;
1226         u64 ram_bytes;
1227         int extent_type;
1228         int ret, err;
1229         int type;
1230         int nocow;
1231         int check_prev = 1;
1232         bool nolock;
1233         u64 ino = btrfs_ino(inode);
1234
1235         path = btrfs_alloc_path();
1236         if (!path) {
1237                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1238                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1239                                              EXTENT_DO_ACCOUNTING |
1240                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1241                                              PAGE_CLEAR_DIRTY |
1242                                              PAGE_SET_WRITEBACK |
1243                                              PAGE_END_WRITEBACK);
1244                 return -ENOMEM;
1245         }
1246
1247         nolock = btrfs_is_free_space_inode(inode);
1248
1249         if (nolock)
1250                 trans = btrfs_join_transaction_nolock(root);
1251         else
1252                 trans = btrfs_join_transaction(root);
1253
1254         if (IS_ERR(trans)) {
1255                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1256                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1257                                              EXTENT_DO_ACCOUNTING |
1258                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1259                                              PAGE_CLEAR_DIRTY |
1260                                              PAGE_SET_WRITEBACK |
1261                                              PAGE_END_WRITEBACK);
1262                 btrfs_free_path(path);
1263                 return PTR_ERR(trans);
1264         }
1265
1266         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1267
1268         cow_start = (u64)-1;
1269         cur_offset = start;
1270         while (1) {
1271                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1272                                                cur_offset, 0);
1273                 if (ret < 0)
1274                         goto error;
1275                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1276                         leaf = path->nodes[0];
1277                         btrfs_item_key_to_cpu(leaf, &found_key,
1278                                               path->slots[0] - 1);
1279                         if (found_key.objectid == ino &&
1280                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1281                                 path->slots[0]--;
1282                 }
1283                 check_prev = 0;
1284 next_slot:
1285                 leaf = path->nodes[0];
1286                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1287                         ret = btrfs_next_leaf(root, path);
1288                         if (ret < 0)
1289                                 goto error;
1290                         if (ret > 0)
1291                                 break;
1292                         leaf = path->nodes[0];
1293                 }
1294
1295                 nocow = 0;
1296                 disk_bytenr = 0;
1297                 num_bytes = 0;
1298                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1299
1300                 if (found_key.objectid > ino ||
1301                     found_key.type > BTRFS_EXTENT_DATA_KEY ||
1302                     found_key.offset > end)
1303                         break;
1304
1305                 if (found_key.offset > cur_offset) {
1306                         extent_end = found_key.offset;
1307                         extent_type = 0;
1308                         goto out_check;
1309                 }
1310
1311                 fi = btrfs_item_ptr(leaf, path->slots[0],
1312                                     struct btrfs_file_extent_item);
1313                 extent_type = btrfs_file_extent_type(leaf, fi);
1314
1315                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1316                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1317                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1318                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1319                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1320                         extent_end = found_key.offset +
1321                                 btrfs_file_extent_num_bytes(leaf, fi);
1322                         disk_num_bytes =
1323                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1324                         if (extent_end <= start) {
1325                                 path->slots[0]++;
1326                                 goto next_slot;
1327                         }
1328                         if (disk_bytenr == 0)
1329                                 goto out_check;
1330                         if (btrfs_file_extent_compression(leaf, fi) ||
1331                             btrfs_file_extent_encryption(leaf, fi) ||
1332                             btrfs_file_extent_other_encoding(leaf, fi))
1333                                 goto out_check;
1334                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1335                                 goto out_check;
1336                         if (btrfs_extent_readonly(root, disk_bytenr))
1337                                 goto out_check;
1338                         if (btrfs_cross_ref_exist(trans, root, ino,
1339                                                   found_key.offset -
1340                                                   extent_offset, disk_bytenr))
1341                                 goto out_check;
1342                         disk_bytenr += extent_offset;
1343                         disk_bytenr += cur_offset - found_key.offset;
1344                         num_bytes = min(end + 1, extent_end) - cur_offset;
1345                         /*
1346                          * if there are pending snapshots for this root,
1347                          * we fall into common COW way.
1348                          */
1349                         if (!nolock) {
1350                                 err = btrfs_start_write_no_snapshoting(root);
1351                                 if (!err)
1352                                         goto out_check;
1353                         }
1354                         /*
1355                          * force cow if csum exists in the range.
1356                          * this ensure that csum for a given extent are
1357                          * either valid or do not exist.
1358                          */
1359                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1360                                 goto out_check;
1361                         nocow = 1;
1362                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1363                         extent_end = found_key.offset +
1364                                 btrfs_file_extent_inline_len(leaf,
1365                                                      path->slots[0], fi);
1366                         extent_end = ALIGN(extent_end, root->sectorsize);
1367                 } else {
1368                         BUG_ON(1);
1369                 }
1370 out_check:
1371                 if (extent_end <= start) {
1372                         path->slots[0]++;
1373                         if (!nolock && nocow)
1374                                 btrfs_end_write_no_snapshoting(root);
1375                         goto next_slot;
1376                 }
1377                 if (!nocow) {
1378                         if (cow_start == (u64)-1)
1379                                 cow_start = cur_offset;
1380                         cur_offset = extent_end;
1381                         if (cur_offset > end)
1382                                 break;
1383                         path->slots[0]++;
1384                         goto next_slot;
1385                 }
1386
1387                 btrfs_release_path(path);
1388                 if (cow_start != (u64)-1) {
1389                         ret = cow_file_range(inode, locked_page,
1390                                              cow_start, found_key.offset - 1,
1391                                              page_started, nr_written, 1);
1392                         if (ret) {
1393                                 if (!nolock && nocow)
1394                                         btrfs_end_write_no_snapshoting(root);
1395                                 goto error;
1396                         }
1397                         cow_start = (u64)-1;
1398                 }
1399
1400                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1401                         struct extent_map *em;
1402                         struct extent_map_tree *em_tree;
1403                         em_tree = &BTRFS_I(inode)->extent_tree;
1404                         em = alloc_extent_map();
1405                         BUG_ON(!em); /* -ENOMEM */
1406                         em->start = cur_offset;
1407                         em->orig_start = found_key.offset - extent_offset;
1408                         em->len = num_bytes;
1409                         em->block_len = num_bytes;
1410                         em->block_start = disk_bytenr;
1411                         em->orig_block_len = disk_num_bytes;
1412                         em->ram_bytes = ram_bytes;
1413                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1414                         em->mod_start = em->start;
1415                         em->mod_len = em->len;
1416                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1417                         set_bit(EXTENT_FLAG_FILLING, &em->flags);
1418                         em->generation = -1;
1419                         while (1) {
1420                                 write_lock(&em_tree->lock);
1421                                 ret = add_extent_mapping(em_tree, em, 1);
1422                                 write_unlock(&em_tree->lock);
1423                                 if (ret != -EEXIST) {
1424                                         free_extent_map(em);
1425                                         break;
1426                                 }
1427                                 btrfs_drop_extent_cache(inode, em->start,
1428                                                 em->start + em->len - 1, 0);
1429                         }
1430                         type = BTRFS_ORDERED_PREALLOC;
1431                 } else {
1432                         type = BTRFS_ORDERED_NOCOW;
1433                 }
1434
1435                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1436                                                num_bytes, num_bytes, type);
1437                 BUG_ON(ret); /* -ENOMEM */
1438
1439                 if (root->root_key.objectid ==
1440                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1441                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1442                                                       num_bytes);
1443                         if (ret) {
1444                                 if (!nolock && nocow)
1445                                         btrfs_end_write_no_snapshoting(root);
1446                                 goto error;
1447                         }
1448                 }
1449
1450                 extent_clear_unlock_delalloc(inode, cur_offset,
1451                                              cur_offset + num_bytes - 1,
1452                                              locked_page, EXTENT_LOCKED |
1453                                              EXTENT_DELALLOC, PAGE_UNLOCK |
1454                                              PAGE_SET_PRIVATE2);
1455                 if (!nolock && nocow)
1456                         btrfs_end_write_no_snapshoting(root);
1457                 cur_offset = extent_end;
1458                 if (cur_offset > end)
1459                         break;
1460         }
1461         btrfs_release_path(path);
1462
1463         if (cur_offset <= end && cow_start == (u64)-1) {
1464                 cow_start = cur_offset;
1465                 cur_offset = end;
1466         }
1467
1468         if (cow_start != (u64)-1) {
1469                 ret = cow_file_range(inode, locked_page, cow_start, end,
1470                                      page_started, nr_written, 1);
1471                 if (ret)
1472                         goto error;
1473         }
1474
1475 error:
1476         err = btrfs_end_transaction(trans, root);
1477         if (!ret)
1478                 ret = err;
1479
1480         if (ret && cur_offset < end)
1481                 extent_clear_unlock_delalloc(inode, cur_offset, end,
1482                                              locked_page, EXTENT_LOCKED |
1483                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1484                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1485                                              PAGE_CLEAR_DIRTY |
1486                                              PAGE_SET_WRITEBACK |
1487                                              PAGE_END_WRITEBACK);
1488         btrfs_free_path(path);
1489         return ret;
1490 }
1491
1492 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1493 {
1494
1495         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1496             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1497                 return 0;
1498
1499         /*
1500          * @defrag_bytes is a hint value, no spinlock held here,
1501          * if is not zero, it means the file is defragging.
1502          * Force cow if given extent needs to be defragged.
1503          */
1504         if (BTRFS_I(inode)->defrag_bytes &&
1505             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1506                            EXTENT_DEFRAG, 0, NULL))
1507                 return 1;
1508
1509         return 0;
1510 }
1511
1512 /*
1513  * extent_io.c call back to do delayed allocation processing
1514  */
1515 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1516                               u64 start, u64 end, int *page_started,
1517                               unsigned long *nr_written)
1518 {
1519         int ret;
1520         int force_cow = need_force_cow(inode, start, end);
1521
1522         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1523                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1524                                          page_started, 1, nr_written);
1525         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1526                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1527                                          page_started, 0, nr_written);
1528         } else if (!inode_need_compress(inode)) {
1529                 ret = cow_file_range(inode, locked_page, start, end,
1530                                       page_started, nr_written, 1);
1531         } else {
1532                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1533                         &BTRFS_I(inode)->runtime_flags);
1534                 ret = cow_file_range_async(inode, locked_page, start, end,
1535                                            page_started, nr_written);
1536         }
1537         return ret;
1538 }
1539
1540 static void btrfs_split_extent_hook(struct inode *inode,
1541                                     struct extent_state *orig, u64 split)
1542 {
1543         u64 size;
1544
1545         /* not delalloc, ignore it */
1546         if (!(orig->state & EXTENT_DELALLOC))
1547                 return;
1548
1549         size = orig->end - orig->start + 1;
1550         if (size > BTRFS_MAX_EXTENT_SIZE) {
1551                 u64 num_extents;
1552                 u64 new_size;
1553
1554                 /*
1555                  * See the explanation in btrfs_merge_extent_hook, the same
1556                  * applies here, just in reverse.
1557                  */
1558                 new_size = orig->end - split + 1;
1559                 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1560                                         BTRFS_MAX_EXTENT_SIZE);
1561                 new_size = split - orig->start;
1562                 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1563                                         BTRFS_MAX_EXTENT_SIZE);
1564                 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1565                               BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1566                         return;
1567         }
1568
1569         spin_lock(&BTRFS_I(inode)->lock);
1570         BTRFS_I(inode)->outstanding_extents++;
1571         spin_unlock(&BTRFS_I(inode)->lock);
1572 }
1573
1574 /*
1575  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1576  * extents so we can keep track of new extents that are just merged onto old
1577  * extents, such as when we are doing sequential writes, so we can properly
1578  * account for the metadata space we'll need.
1579  */
1580 static void btrfs_merge_extent_hook(struct inode *inode,
1581                                     struct extent_state *new,
1582                                     struct extent_state *other)
1583 {
1584         u64 new_size, old_size;
1585         u64 num_extents;
1586
1587         /* not delalloc, ignore it */
1588         if (!(other->state & EXTENT_DELALLOC))
1589                 return;
1590
1591         if (new->start > other->start)
1592                 new_size = new->end - other->start + 1;
1593         else
1594                 new_size = other->end - new->start + 1;
1595
1596         /* we're not bigger than the max, unreserve the space and go */
1597         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1598                 spin_lock(&BTRFS_I(inode)->lock);
1599                 BTRFS_I(inode)->outstanding_extents--;
1600                 spin_unlock(&BTRFS_I(inode)->lock);
1601                 return;
1602         }
1603
1604         /*
1605          * We have to add up either side to figure out how many extents were
1606          * accounted for before we merged into one big extent.  If the number of
1607          * extents we accounted for is <= the amount we need for the new range
1608          * then we can return, otherwise drop.  Think of it like this
1609          *
1610          * [ 4k][MAX_SIZE]
1611          *
1612          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1613          * need 2 outstanding extents, on one side we have 1 and the other side
1614          * we have 1 so they are == and we can return.  But in this case
1615          *
1616          * [MAX_SIZE+4k][MAX_SIZE+4k]
1617          *
1618          * Each range on their own accounts for 2 extents, but merged together
1619          * they are only 3 extents worth of accounting, so we need to drop in
1620          * this case.
1621          */
1622         old_size = other->end - other->start + 1;
1623         num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1624                                 BTRFS_MAX_EXTENT_SIZE);
1625         old_size = new->end - new->start + 1;
1626         num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1627                                  BTRFS_MAX_EXTENT_SIZE);
1628
1629         if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1630                       BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1631                 return;
1632
1633         spin_lock(&BTRFS_I(inode)->lock);
1634         BTRFS_I(inode)->outstanding_extents--;
1635         spin_unlock(&BTRFS_I(inode)->lock);
1636 }
1637
1638 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1639                                       struct inode *inode)
1640 {
1641         spin_lock(&root->delalloc_lock);
1642         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1643                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1644                               &root->delalloc_inodes);
1645                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1646                         &BTRFS_I(inode)->runtime_flags);
1647                 root->nr_delalloc_inodes++;
1648                 if (root->nr_delalloc_inodes == 1) {
1649                         spin_lock(&root->fs_info->delalloc_root_lock);
1650                         BUG_ON(!list_empty(&root->delalloc_root));
1651                         list_add_tail(&root->delalloc_root,
1652                                       &root->fs_info->delalloc_roots);
1653                         spin_unlock(&root->fs_info->delalloc_root_lock);
1654                 }
1655         }
1656         spin_unlock(&root->delalloc_lock);
1657 }
1658
1659 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1660                                      struct inode *inode)
1661 {
1662         spin_lock(&root->delalloc_lock);
1663         if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1664                 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1665                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1666                           &BTRFS_I(inode)->runtime_flags);
1667                 root->nr_delalloc_inodes--;
1668                 if (!root->nr_delalloc_inodes) {
1669                         spin_lock(&root->fs_info->delalloc_root_lock);
1670                         BUG_ON(list_empty(&root->delalloc_root));
1671                         list_del_init(&root->delalloc_root);
1672                         spin_unlock(&root->fs_info->delalloc_root_lock);
1673                 }
1674         }
1675         spin_unlock(&root->delalloc_lock);
1676 }
1677
1678 /*
1679  * extent_io.c set_bit_hook, used to track delayed allocation
1680  * bytes in this file, and to maintain the list of inodes that
1681  * have pending delalloc work to be done.
1682  */
1683 static void btrfs_set_bit_hook(struct inode *inode,
1684                                struct extent_state *state, unsigned *bits)
1685 {
1686
1687         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1688                 WARN_ON(1);
1689         /*
1690          * set_bit and clear bit hooks normally require _irqsave/restore
1691          * but in this case, we are only testing for the DELALLOC
1692          * bit, which is only set or cleared with irqs on
1693          */
1694         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1695                 struct btrfs_root *root = BTRFS_I(inode)->root;
1696                 u64 len = state->end + 1 - state->start;
1697                 bool do_list = !btrfs_is_free_space_inode(inode);
1698
1699                 if (*bits & EXTENT_FIRST_DELALLOC) {
1700                         *bits &= ~EXTENT_FIRST_DELALLOC;
1701                 } else {
1702                         spin_lock(&BTRFS_I(inode)->lock);
1703                         BTRFS_I(inode)->outstanding_extents++;
1704                         spin_unlock(&BTRFS_I(inode)->lock);
1705                 }
1706
1707                 /* For sanity tests */
1708                 if (btrfs_test_is_dummy_root(root))
1709                         return;
1710
1711                 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1712                                      root->fs_info->delalloc_batch);
1713                 spin_lock(&BTRFS_I(inode)->lock);
1714                 BTRFS_I(inode)->delalloc_bytes += len;
1715                 if (*bits & EXTENT_DEFRAG)
1716                         BTRFS_I(inode)->defrag_bytes += len;
1717                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1718                                          &BTRFS_I(inode)->runtime_flags))
1719                         btrfs_add_delalloc_inodes(root, inode);
1720                 spin_unlock(&BTRFS_I(inode)->lock);
1721         }
1722 }
1723
1724 /*
1725  * extent_io.c clear_bit_hook, see set_bit_hook for why
1726  */
1727 static void btrfs_clear_bit_hook(struct inode *inode,
1728                                  struct extent_state *state,
1729                                  unsigned *bits)
1730 {
1731         u64 len = state->end + 1 - state->start;
1732         u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
1733                                     BTRFS_MAX_EXTENT_SIZE);
1734
1735         spin_lock(&BTRFS_I(inode)->lock);
1736         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
1737                 BTRFS_I(inode)->defrag_bytes -= len;
1738         spin_unlock(&BTRFS_I(inode)->lock);
1739
1740         /*
1741          * set_bit and clear bit hooks normally require _irqsave/restore
1742          * but in this case, we are only testing for the DELALLOC
1743          * bit, which is only set or cleared with irqs on
1744          */
1745         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1746                 struct btrfs_root *root = BTRFS_I(inode)->root;
1747                 bool do_list = !btrfs_is_free_space_inode(inode);
1748
1749                 if (*bits & EXTENT_FIRST_DELALLOC) {
1750                         *bits &= ~EXTENT_FIRST_DELALLOC;
1751                 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1752                         spin_lock(&BTRFS_I(inode)->lock);
1753                         BTRFS_I(inode)->outstanding_extents -= num_extents;
1754                         spin_unlock(&BTRFS_I(inode)->lock);
1755                 }
1756
1757                 /*
1758                  * We don't reserve metadata space for space cache inodes so we
1759                  * don't need to call dellalloc_release_metadata if there is an
1760                  * error.
1761                  */
1762                 if (*bits & EXTENT_DO_ACCOUNTING &&
1763                     root != root->fs_info->tree_root)
1764                         btrfs_delalloc_release_metadata(inode, len);
1765
1766                 /* For sanity tests. */
1767                 if (btrfs_test_is_dummy_root(root))
1768                         return;
1769
1770                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1771                     && do_list && !(state->state & EXTENT_NORESERVE))
1772                         btrfs_free_reserved_data_space(inode, len);
1773
1774                 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1775                                      root->fs_info->delalloc_batch);
1776                 spin_lock(&BTRFS_I(inode)->lock);
1777                 BTRFS_I(inode)->delalloc_bytes -= len;
1778                 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1779                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1780                              &BTRFS_I(inode)->runtime_flags))
1781                         btrfs_del_delalloc_inode(root, inode);
1782                 spin_unlock(&BTRFS_I(inode)->lock);
1783         }
1784 }
1785
1786 /*
1787  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1788  * we don't create bios that span stripes or chunks
1789  */
1790 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1791                          size_t size, struct bio *bio,
1792                          unsigned long bio_flags)
1793 {
1794         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1795         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1796         u64 length = 0;
1797         u64 map_length;
1798         int ret;
1799
1800         if (bio_flags & EXTENT_BIO_COMPRESSED)
1801                 return 0;
1802
1803         length = bio->bi_iter.bi_size;
1804         map_length = length;
1805         ret = btrfs_map_block(root->fs_info, rw, logical,
1806                               &map_length, NULL, 0);
1807         /* Will always return 0 with map_multi == NULL */
1808         BUG_ON(ret < 0);
1809         if (map_length < length + size)
1810                 return 1;
1811         return 0;
1812 }
1813
1814 /*
1815  * in order to insert checksums into the metadata in large chunks,
1816  * we wait until bio submission time.   All the pages in the bio are
1817  * checksummed and sums are attached onto the ordered extent record.
1818  *
1819  * At IO completion time the cums attached on the ordered extent record
1820  * are inserted into the btree
1821  */
1822 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1823                                     struct bio *bio, int mirror_num,
1824                                     unsigned long bio_flags,
1825                                     u64 bio_offset)
1826 {
1827         struct btrfs_root *root = BTRFS_I(inode)->root;
1828         int ret = 0;
1829
1830         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1831         BUG_ON(ret); /* -ENOMEM */
1832         return 0;
1833 }
1834
1835 /*
1836  * in order to insert checksums into the metadata in large chunks,
1837  * we wait until bio submission time.   All the pages in the bio are
1838  * checksummed and sums are attached onto the ordered extent record.
1839  *
1840  * At IO completion time the cums attached on the ordered extent record
1841  * are inserted into the btree
1842  */
1843 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1844                           int mirror_num, unsigned long bio_flags,
1845                           u64 bio_offset)
1846 {
1847         struct btrfs_root *root = BTRFS_I(inode)->root;
1848         int ret;
1849
1850         ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1851         if (ret) {
1852                 bio->bi_error = ret;
1853                 bio_endio(bio);
1854         }
1855         return ret;
1856 }
1857
1858 /*
1859  * extent_io.c submission hook. This does the right thing for csum calculation
1860  * on write, or reading the csums from the tree before a read
1861  */
1862 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1863                           int mirror_num, unsigned long bio_flags,
1864                           u64 bio_offset)
1865 {
1866         struct btrfs_root *root = BTRFS_I(inode)->root;
1867         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1868         int ret = 0;
1869         int skip_sum;
1870         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1871
1872         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1873
1874         if (btrfs_is_free_space_inode(inode))
1875                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1876
1877         if (!(rw & REQ_WRITE)) {
1878                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1879                 if (ret)
1880                         goto out;
1881
1882                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1883                         ret = btrfs_submit_compressed_read(inode, bio,
1884                                                            mirror_num,
1885                                                            bio_flags);
1886                         goto out;
1887                 } else if (!skip_sum) {
1888                         ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1889                         if (ret)
1890                                 goto out;
1891                 }
1892                 goto mapit;
1893         } else if (async && !skip_sum) {
1894                 /* csum items have already been cloned */
1895                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1896                         goto mapit;
1897                 /* we're doing a write, do the async checksumming */
1898                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1899                                    inode, rw, bio, mirror_num,
1900                                    bio_flags, bio_offset,
1901                                    __btrfs_submit_bio_start,
1902                                    __btrfs_submit_bio_done);
1903                 goto out;
1904         } else if (!skip_sum) {
1905                 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1906                 if (ret)
1907                         goto out;
1908         }
1909
1910 mapit:
1911         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1912
1913 out:
1914         if (ret < 0) {
1915                 bio->bi_error = ret;
1916                 bio_endio(bio);
1917         }
1918         return ret;
1919 }
1920
1921 /*
1922  * given a list of ordered sums record them in the inode.  This happens
1923  * at IO completion time based on sums calculated at bio submission time.
1924  */
1925 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1926                              struct inode *inode, u64 file_offset,
1927                              struct list_head *list)
1928 {
1929         struct btrfs_ordered_sum *sum;
1930
1931         list_for_each_entry(sum, list, list) {
1932                 trans->adding_csums = 1;
1933                 btrfs_csum_file_blocks(trans,
1934                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1935                 trans->adding_csums = 0;
1936         }
1937         return 0;
1938 }
1939
1940 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1941                               struct extent_state **cached_state)
1942 {
1943         WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1944         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1945                                    cached_state, GFP_NOFS);
1946 }
1947
1948 /* see btrfs_writepage_start_hook for details on why this is required */
1949 struct btrfs_writepage_fixup {
1950         struct page *page;
1951         struct btrfs_work work;
1952 };
1953
1954 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1955 {
1956         struct btrfs_writepage_fixup *fixup;
1957         struct btrfs_ordered_extent *ordered;
1958         struct extent_state *cached_state = NULL;
1959         struct page *page;
1960         struct inode *inode;
1961         u64 page_start;
1962         u64 page_end;
1963         int ret;
1964
1965         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1966         page = fixup->page;
1967 again:
1968         lock_page(page);
1969         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1970                 ClearPageChecked(page);
1971                 goto out_page;
1972         }
1973
1974         inode = page->mapping->host;
1975         page_start = page_offset(page);
1976         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1977
1978         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1979                          &cached_state);
1980
1981         /* already ordered? We're done */
1982         if (PagePrivate2(page))
1983                 goto out;
1984
1985         ordered = btrfs_lookup_ordered_extent(inode, page_start);
1986         if (ordered) {
1987                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1988                                      page_end, &cached_state, GFP_NOFS);
1989                 unlock_page(page);
1990                 btrfs_start_ordered_extent(inode, ordered, 1);
1991                 btrfs_put_ordered_extent(ordered);
1992                 goto again;
1993         }
1994
1995         ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1996         if (ret) {
1997                 mapping_set_error(page->mapping, ret);
1998                 end_extent_writepage(page, ret, page_start, page_end);
1999                 ClearPageChecked(page);
2000                 goto out;
2001          }
2002
2003         btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
2004         ClearPageChecked(page);
2005         set_page_dirty(page);
2006 out:
2007         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2008                              &cached_state, GFP_NOFS);
2009 out_page:
2010         unlock_page(page);
2011         page_cache_release(page);
2012         kfree(fixup);
2013 }
2014
2015 /*
2016  * There are a few paths in the higher layers of the kernel that directly
2017  * set the page dirty bit without asking the filesystem if it is a
2018  * good idea.  This causes problems because we want to make sure COW
2019  * properly happens and the data=ordered rules are followed.
2020  *
2021  * In our case any range that doesn't have the ORDERED bit set
2022  * hasn't been properly setup for IO.  We kick off an async process
2023  * to fix it up.  The async helper will wait for ordered extents, set
2024  * the delalloc bit and make it safe to write the page.
2025  */
2026 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2027 {
2028         struct inode *inode = page->mapping->host;
2029         struct btrfs_writepage_fixup *fixup;
2030         struct btrfs_root *root = BTRFS_I(inode)->root;
2031
2032         /* this page is properly in the ordered list */
2033         if (TestClearPagePrivate2(page))
2034                 return 0;
2035
2036         if (PageChecked(page))
2037                 return -EAGAIN;
2038
2039         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2040         if (!fixup)
2041                 return -EAGAIN;
2042
2043         SetPageChecked(page);
2044         page_cache_get(page);
2045         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2046                         btrfs_writepage_fixup_worker, NULL, NULL);
2047         fixup->page = page;
2048         btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
2049         return -EBUSY;
2050 }
2051
2052 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2053                                        struct inode *inode, u64 file_pos,
2054                                        u64 disk_bytenr, u64 disk_num_bytes,
2055                                        u64 num_bytes, u64 ram_bytes,
2056                                        u8 compression, u8 encryption,
2057                                        u16 other_encoding, int extent_type)
2058 {
2059         struct btrfs_root *root = BTRFS_I(inode)->root;
2060         struct btrfs_file_extent_item *fi;
2061         struct btrfs_path *path;
2062         struct extent_buffer *leaf;
2063         struct btrfs_key ins;
2064         int extent_inserted = 0;
2065         int ret;
2066
2067         path = btrfs_alloc_path();
2068         if (!path)
2069                 return -ENOMEM;
2070
2071         /*
2072          * we may be replacing one extent in the tree with another.
2073          * The new extent is pinned in the extent map, and we don't want
2074          * to drop it from the cache until it is completely in the btree.
2075          *
2076          * So, tell btrfs_drop_extents to leave this extent in the cache.
2077          * the caller is expected to unpin it and allow it to be merged
2078          * with the others.
2079          */
2080         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2081                                    file_pos + num_bytes, NULL, 0,
2082                                    1, sizeof(*fi), &extent_inserted);
2083         if (ret)
2084                 goto out;
2085
2086         if (!extent_inserted) {
2087                 ins.objectid = btrfs_ino(inode);
2088                 ins.offset = file_pos;
2089                 ins.type = BTRFS_EXTENT_DATA_KEY;
2090
2091                 path->leave_spinning = 1;
2092                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2093                                               sizeof(*fi));
2094                 if (ret)
2095                         goto out;
2096         }
2097         leaf = path->nodes[0];
2098         fi = btrfs_item_ptr(leaf, path->slots[0],
2099                             struct btrfs_file_extent_item);
2100         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2101         btrfs_set_file_extent_type(leaf, fi, extent_type);
2102         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2103         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2104         btrfs_set_file_extent_offset(leaf, fi, 0);
2105         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2106         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2107         btrfs_set_file_extent_compression(leaf, fi, compression);
2108         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2109         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2110
2111         btrfs_mark_buffer_dirty(leaf);
2112         btrfs_release_path(path);
2113
2114         inode_add_bytes(inode, num_bytes);
2115
2116         ins.objectid = disk_bytenr;
2117         ins.offset = disk_num_bytes;
2118         ins.type = BTRFS_EXTENT_ITEM_KEY;
2119         ret = btrfs_alloc_reserved_file_extent(trans, root,
2120                                         root->root_key.objectid,
2121                                         btrfs_ino(inode), file_pos, &ins);
2122 out:
2123         btrfs_free_path(path);
2124
2125         return ret;
2126 }
2127
2128 /* snapshot-aware defrag */
2129 struct sa_defrag_extent_backref {
2130         struct rb_node node;
2131         struct old_sa_defrag_extent *old;
2132         u64 root_id;
2133         u64 inum;
2134         u64 file_pos;
2135         u64 extent_offset;
2136         u64 num_bytes;
2137         u64 generation;
2138 };
2139
2140 struct old_sa_defrag_extent {
2141         struct list_head list;
2142         struct new_sa_defrag_extent *new;
2143
2144         u64 extent_offset;
2145         u64 bytenr;
2146         u64 offset;
2147         u64 len;
2148         int count;
2149 };
2150
2151 struct new_sa_defrag_extent {
2152         struct rb_root root;
2153         struct list_head head;
2154         struct btrfs_path *path;
2155         struct inode *inode;
2156         u64 file_pos;
2157         u64 len;
2158         u64 bytenr;
2159         u64 disk_len;
2160         u8 compress_type;
2161 };
2162
2163 static int backref_comp(struct sa_defrag_extent_backref *b1,
2164                         struct sa_defrag_extent_backref *b2)
2165 {
2166         if (b1->root_id < b2->root_id)
2167                 return -1;
2168         else if (b1->root_id > b2->root_id)
2169                 return 1;
2170
2171         if (b1->inum < b2->inum)
2172                 return -1;
2173         else if (b1->inum > b2->inum)
2174                 return 1;
2175
2176         if (b1->file_pos < b2->file_pos)
2177                 return -1;
2178         else if (b1->file_pos > b2->file_pos)
2179                 return 1;
2180
2181         /*
2182          * [------------------------------] ===> (a range of space)
2183          *     |<--->|   |<---->| =============> (fs/file tree A)
2184          * |<---------------------------->| ===> (fs/file tree B)
2185          *
2186          * A range of space can refer to two file extents in one tree while
2187          * refer to only one file extent in another tree.
2188          *
2189          * So we may process a disk offset more than one time(two extents in A)
2190          * and locate at the same extent(one extent in B), then insert two same
2191          * backrefs(both refer to the extent in B).
2192          */
2193         return 0;
2194 }
2195
2196 static void backref_insert(struct rb_root *root,
2197                            struct sa_defrag_extent_backref *backref)
2198 {
2199         struct rb_node **p = &root->rb_node;
2200         struct rb_node *parent = NULL;
2201         struct sa_defrag_extent_backref *entry;
2202         int ret;
2203
2204         while (*p) {
2205                 parent = *p;
2206                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2207
2208                 ret = backref_comp(backref, entry);
2209                 if (ret < 0)
2210                         p = &(*p)->rb_left;
2211                 else
2212                         p = &(*p)->rb_right;
2213         }
2214
2215         rb_link_node(&backref->node, parent, p);
2216         rb_insert_color(&backref->node, root);
2217 }
2218
2219 /*
2220  * Note the backref might has changed, and in this case we just return 0.
2221  */
2222 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2223                                        void *ctx)
2224 {
2225         struct btrfs_file_extent_item *extent;
2226         struct btrfs_fs_info *fs_info;
2227         struct old_sa_defrag_extent *old = ctx;
2228         struct new_sa_defrag_extent *new = old->new;
2229         struct btrfs_path *path = new->path;
2230         struct btrfs_key key;
2231         struct btrfs_root *root;
2232         struct sa_defrag_extent_backref *backref;
2233         struct extent_buffer *leaf;
2234         struct inode *inode = new->inode;
2235         int slot;
2236         int ret;
2237         u64 extent_offset;
2238         u64 num_bytes;
2239
2240         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2241             inum == btrfs_ino(inode))
2242                 return 0;
2243
2244         key.objectid = root_id;
2245         key.type = BTRFS_ROOT_ITEM_KEY;
2246         key.offset = (u64)-1;
2247
2248         fs_info = BTRFS_I(inode)->root->fs_info;
2249         root = btrfs_read_fs_root_no_name(fs_info, &key);
2250         if (IS_ERR(root)) {
2251                 if (PTR_ERR(root) == -ENOENT)
2252                         return 0;
2253                 WARN_ON(1);
2254                 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2255                          inum, offset, root_id);
2256                 return PTR_ERR(root);
2257         }
2258
2259         key.objectid = inum;
2260         key.type = BTRFS_EXTENT_DATA_KEY;
2261         if (offset > (u64)-1 << 32)
2262                 key.offset = 0;
2263         else
2264                 key.offset = offset;
2265
2266         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2267         if (WARN_ON(ret < 0))
2268                 return ret;
2269         ret = 0;
2270
2271         while (1) {
2272                 cond_resched();
2273
2274                 leaf = path->nodes[0];
2275                 slot = path->slots[0];
2276
2277                 if (slot >= btrfs_header_nritems(leaf)) {
2278                         ret = btrfs_next_leaf(root, path);
2279                         if (ret < 0) {
2280                                 goto out;
2281                         } else if (ret > 0) {
2282                                 ret = 0;
2283                                 goto out;
2284                         }
2285                         continue;
2286                 }
2287
2288                 path->slots[0]++;
2289
2290                 btrfs_item_key_to_cpu(leaf, &key, slot);
2291
2292                 if (key.objectid > inum)
2293                         goto out;
2294
2295                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2296                         continue;
2297
2298                 extent = btrfs_item_ptr(leaf, slot,
2299                                         struct btrfs_file_extent_item);
2300
2301                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2302                         continue;
2303
2304                 /*
2305                  * 'offset' refers to the exact key.offset,
2306                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2307                  * (key.offset - extent_offset).
2308                  */
2309                 if (key.offset != offset)
2310                         continue;
2311
2312                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2313                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2314
2315                 if (extent_offset >= old->extent_offset + old->offset +
2316                     old->len || extent_offset + num_bytes <=
2317                     old->extent_offset + old->offset)
2318                         continue;
2319                 break;
2320         }
2321
2322         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2323         if (!backref) {
2324                 ret = -ENOENT;
2325                 goto out;
2326         }
2327
2328         backref->root_id = root_id;
2329         backref->inum = inum;
2330         backref->file_pos = offset;
2331         backref->num_bytes = num_bytes;
2332         backref->extent_offset = extent_offset;
2333         backref->generation = btrfs_file_extent_generation(leaf, extent);
2334         backref->old = old;
2335         backref_insert(&new->root, backref);
2336         old->count++;
2337 out:
2338         btrfs_release_path(path);
2339         WARN_ON(ret);
2340         return ret;
2341 }
2342
2343 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2344                                    struct new_sa_defrag_extent *new)
2345 {
2346         struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2347         struct old_sa_defrag_extent *old, *tmp;
2348         int ret;
2349
2350         new->path = path;
2351
2352         list_for_each_entry_safe(old, tmp, &new->head, list) {
2353                 ret = iterate_inodes_from_logical(old->bytenr +
2354                                                   old->extent_offset, fs_info,
2355                                                   path, record_one_backref,
2356                                                   old);
2357                 if (ret < 0 && ret != -ENOENT)
2358                         return false;
2359
2360                 /* no backref to be processed for this extent */
2361                 if (!old->count) {
2362                         list_del(&old->list);
2363                         kfree(old);
2364                 }
2365         }
2366
2367         if (list_empty(&new->head))
2368                 return false;
2369
2370         return true;
2371 }
2372
2373 static int relink_is_mergable(struct extent_buffer *leaf,
2374                               struct btrfs_file_extent_item *fi,
2375                               struct new_sa_defrag_extent *new)
2376 {
2377         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2378                 return 0;
2379
2380         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2381                 return 0;
2382
2383         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2384                 return 0;
2385
2386         if (btrfs_file_extent_encryption(leaf, fi) ||
2387             btrfs_file_extent_other_encoding(leaf, fi))
2388                 return 0;
2389
2390         return 1;
2391 }
2392
2393 /*
2394  * Note the backref might has changed, and in this case we just return 0.
2395  */
2396 static noinline int relink_extent_backref(struct btrfs_path *path,
2397                                  struct sa_defrag_extent_backref *prev,
2398                                  struct sa_defrag_extent_backref *backref)
2399 {
2400         struct btrfs_file_extent_item *extent;
2401         struct btrfs_file_extent_item *item;
2402         struct btrfs_ordered_extent *ordered;
2403         struct btrfs_trans_handle *trans;
2404         struct btrfs_fs_info *fs_info;
2405         struct btrfs_root *root;
2406         struct btrfs_key key;
2407         struct extent_buffer *leaf;
2408         struct old_sa_defrag_extent *old = backref->old;
2409         struct new_sa_defrag_extent *new = old->new;
2410         struct inode *src_inode = new->inode;
2411         struct inode *inode;
2412         struct extent_state *cached = NULL;
2413         int ret = 0;
2414         u64 start;
2415         u64 len;
2416         u64 lock_start;
2417         u64 lock_end;
2418         bool merge = false;
2419         int index;
2420
2421         if (prev && prev->root_id == backref->root_id &&
2422             prev->inum == backref->inum &&
2423             prev->file_pos + prev->num_bytes == backref->file_pos)
2424                 merge = true;
2425
2426         /* step 1: get root */
2427         key.objectid = backref->root_id;
2428         key.type = BTRFS_ROOT_ITEM_KEY;
2429         key.offset = (u64)-1;
2430
2431         fs_info = BTRFS_I(src_inode)->root->fs_info;
2432         index = srcu_read_lock(&fs_info->subvol_srcu);
2433
2434         root = btrfs_read_fs_root_no_name(fs_info, &key);
2435         if (IS_ERR(root)) {
2436                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2437                 if (PTR_ERR(root) == -ENOENT)
2438                         return 0;
2439                 return PTR_ERR(root);
2440         }
2441
2442         if (btrfs_root_readonly(root)) {
2443                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2444                 return 0;
2445         }
2446
2447         /* step 2: get inode */
2448         key.objectid = backref->inum;
2449         key.type = BTRFS_INODE_ITEM_KEY;
2450         key.offset = 0;
2451
2452         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2453         if (IS_ERR(inode)) {
2454                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2455                 return 0;
2456         }
2457
2458         srcu_read_unlock(&fs_info->subvol_srcu, index);
2459
2460         /* step 3: relink backref */
2461         lock_start = backref->file_pos;
2462         lock_end = backref->file_pos + backref->num_bytes - 1;
2463         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2464                          0, &cached);
2465
2466         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2467         if (ordered) {
2468                 btrfs_put_ordered_extent(ordered);
2469                 goto out_unlock;
2470         }
2471
2472         trans = btrfs_join_transaction(root);
2473         if (IS_ERR(trans)) {
2474                 ret = PTR_ERR(trans);
2475                 goto out_unlock;
2476         }
2477
2478         key.objectid = backref->inum;
2479         key.type = BTRFS_EXTENT_DATA_KEY;
2480         key.offset = backref->file_pos;
2481
2482         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2483         if (ret < 0) {
2484                 goto out_free_path;
2485         } else if (ret > 0) {
2486                 ret = 0;
2487                 goto out_free_path;
2488         }
2489
2490         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2491                                 struct btrfs_file_extent_item);
2492
2493         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2494             backref->generation)
2495                 goto out_free_path;
2496
2497         btrfs_release_path(path);
2498
2499         start = backref->file_pos;
2500         if (backref->extent_offset < old->extent_offset + old->offset)
2501                 start += old->extent_offset + old->offset -
2502                          backref->extent_offset;
2503
2504         len = min(backref->extent_offset + backref->num_bytes,
2505                   old->extent_offset + old->offset + old->len);
2506         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2507
2508         ret = btrfs_drop_extents(trans, root, inode, start,
2509                                  start + len, 1);
2510         if (ret)
2511                 goto out_free_path;
2512 again:
2513         key.objectid = btrfs_ino(inode);
2514         key.type = BTRFS_EXTENT_DATA_KEY;
2515         key.offset = start;
2516
2517         path->leave_spinning = 1;
2518         if (merge) {
2519                 struct btrfs_file_extent_item *fi;
2520                 u64 extent_len;
2521                 struct btrfs_key found_key;
2522
2523                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2524                 if (ret < 0)
2525                         goto out_free_path;
2526
2527                 path->slots[0]--;
2528                 leaf = path->nodes[0];
2529                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2530
2531                 fi = btrfs_item_ptr(leaf, path->slots[0],
2532                                     struct btrfs_file_extent_item);
2533                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2534
2535                 if (extent_len + found_key.offset == start &&
2536                     relink_is_mergable(leaf, fi, new)) {
2537                         btrfs_set_file_extent_num_bytes(leaf, fi,
2538                                                         extent_len + len);
2539                         btrfs_mark_buffer_dirty(leaf);
2540                         inode_add_bytes(inode, len);
2541
2542                         ret = 1;
2543                         goto out_free_path;
2544                 } else {
2545                         merge = false;
2546                         btrfs_release_path(path);
2547                         goto again;
2548                 }
2549         }
2550
2551         ret = btrfs_insert_empty_item(trans, root, path, &key,
2552                                         sizeof(*extent));
2553         if (ret) {
2554                 btrfs_abort_transaction(trans, root, ret);
2555                 goto out_free_path;
2556         }
2557
2558         leaf = path->nodes[0];
2559         item = btrfs_item_ptr(leaf, path->slots[0],
2560                                 struct btrfs_file_extent_item);
2561         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2562         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2563         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2564         btrfs_set_file_extent_num_bytes(leaf, item, len);
2565         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2566         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2567         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2568         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2569         btrfs_set_file_extent_encryption(leaf, item, 0);
2570         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2571
2572         btrfs_mark_buffer_dirty(leaf);
2573         inode_add_bytes(inode, len);
2574         btrfs_release_path(path);
2575
2576         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2577                         new->disk_len, 0,
2578                         backref->root_id, backref->inum,
2579                         new->file_pos, 0);      /* start - extent_offset */
2580         if (ret) {
2581                 btrfs_abort_transaction(trans, root, ret);
2582                 goto out_free_path;
2583         }
2584
2585         ret = 1;
2586 out_free_path:
2587         btrfs_release_path(path);
2588         path->leave_spinning = 0;
2589         btrfs_end_transaction(trans, root);
2590 out_unlock:
2591         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2592                              &cached, GFP_NOFS);
2593         iput(inode);
2594         return ret;
2595 }
2596
2597 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2598 {
2599         struct old_sa_defrag_extent *old, *tmp;
2600
2601         if (!new)
2602                 return;
2603
2604         list_for_each_entry_safe(old, tmp, &new->head, list) {
2605                 kfree(old);
2606         }
2607         kfree(new);
2608 }
2609
2610 static void relink_file_extents(struct new_sa_defrag_extent *new)
2611 {
2612         struct btrfs_path *path;
2613         struct sa_defrag_extent_backref *backref;
2614         struct sa_defrag_extent_backref *prev = NULL;
2615         struct inode *inode;
2616         struct btrfs_root *root;
2617         struct rb_node *node;
2618         int ret;
2619
2620         inode = new->inode;
2621         root = BTRFS_I(inode)->root;
2622
2623         path = btrfs_alloc_path();
2624         if (!path)
2625                 return;
2626
2627         if (!record_extent_backrefs(path, new)) {
2628                 btrfs_free_path(path);
2629                 goto out;
2630         }
2631         btrfs_release_path(path);
2632
2633         while (1) {
2634                 node = rb_first(&new->root);
2635                 if (!node)
2636                         break;
2637                 rb_erase(node, &new->root);
2638
2639                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2640
2641                 ret = relink_extent_backref(path, prev, backref);
2642                 WARN_ON(ret < 0);
2643
2644                 kfree(prev);
2645
2646                 if (ret == 1)
2647                         prev = backref;
2648                 else
2649                         prev = NULL;
2650                 cond_resched();
2651         }
2652         kfree(prev);
2653
2654         btrfs_free_path(path);
2655 out:
2656         free_sa_defrag_extent(new);
2657
2658         atomic_dec(&root->fs_info->defrag_running);
2659         wake_up(&root->fs_info->transaction_wait);
2660 }
2661
2662 static struct new_sa_defrag_extent *
2663 record_old_file_extents(struct inode *inode,
2664                         struct btrfs_ordered_extent *ordered)
2665 {
2666         struct btrfs_root *root = BTRFS_I(inode)->root;
2667         struct btrfs_path *path;
2668         struct btrfs_key key;
2669         struct old_sa_defrag_extent *old;
2670         struct new_sa_defrag_extent *new;
2671         int ret;
2672
2673         new = kmalloc(sizeof(*new), GFP_NOFS);
2674         if (!new)
2675                 return NULL;
2676
2677         new->inode = inode;
2678         new->file_pos = ordered->file_offset;
2679         new->len = ordered->len;
2680         new->bytenr = ordered->start;
2681         new->disk_len = ordered->disk_len;
2682         new->compress_type = ordered->compress_type;
2683         new->root = RB_ROOT;
2684         INIT_LIST_HEAD(&new->head);
2685
2686         path = btrfs_alloc_path();
2687         if (!path)
2688                 goto out_kfree;
2689
2690         key.objectid = btrfs_ino(inode);
2691         key.type = BTRFS_EXTENT_DATA_KEY;
2692         key.offset = new->file_pos;
2693
2694         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2695         if (ret < 0)
2696                 goto out_free_path;
2697         if (ret > 0 && path->slots[0] > 0)
2698                 path->slots[0]--;
2699
2700         /* find out all the old extents for the file range */
2701         while (1) {
2702                 struct btrfs_file_extent_item *extent;
2703                 struct extent_buffer *l;
2704                 int slot;
2705                 u64 num_bytes;
2706                 u64 offset;
2707                 u64 end;
2708                 u64 disk_bytenr;
2709                 u64 extent_offset;
2710
2711                 l = path->nodes[0];
2712                 slot = path->slots[0];
2713
2714                 if (slot >= btrfs_header_nritems(l)) {
2715                         ret = btrfs_next_leaf(root, path);
2716                         if (ret < 0)
2717                                 goto out_free_path;
2718                         else if (ret > 0)
2719                                 break;
2720                         continue;
2721                 }
2722
2723                 btrfs_item_key_to_cpu(l, &key, slot);
2724
2725                 if (key.objectid != btrfs_ino(inode))
2726                         break;
2727                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2728                         break;
2729                 if (key.offset >= new->file_pos + new->len)
2730                         break;
2731
2732                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2733
2734                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2735                 if (key.offset + num_bytes < new->file_pos)
2736                         goto next;
2737
2738                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2739                 if (!disk_bytenr)
2740                         goto next;
2741
2742                 extent_offset = btrfs_file_extent_offset(l, extent);
2743
2744                 old = kmalloc(sizeof(*old), GFP_NOFS);
2745                 if (!old)
2746                         goto out_free_path;
2747
2748                 offset = max(new->file_pos, key.offset);
2749                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2750
2751                 old->bytenr = disk_bytenr;
2752                 old->extent_offset = extent_offset;
2753                 old->offset = offset - key.offset;
2754                 old->len = end - offset;
2755                 old->new = new;
2756                 old->count = 0;
2757                 list_add_tail(&old->list, &new->head);
2758 next:
2759                 path->slots[0]++;
2760                 cond_resched();
2761         }
2762
2763         btrfs_free_path(path);
2764         atomic_inc(&root->fs_info->defrag_running);
2765
2766         return new;
2767
2768 out_free_path:
2769         btrfs_free_path(path);
2770 out_kfree:
2771         free_sa_defrag_extent(new);
2772         return NULL;
2773 }
2774
2775 static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
2776                                          u64 start, u64 len)
2777 {
2778         struct btrfs_block_group_cache *cache;
2779
2780         cache = btrfs_lookup_block_group(root->fs_info, start);
2781         ASSERT(cache);
2782
2783         spin_lock(&cache->lock);
2784         cache->delalloc_bytes -= len;
2785         spin_unlock(&cache->lock);
2786
2787         btrfs_put_block_group(cache);
2788 }
2789
2790 /* as ordered data IO finishes, this gets called so we can finish
2791  * an ordered extent if the range of bytes in the file it covers are
2792  * fully written.
2793  */
2794 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2795 {
2796         struct inode *inode = ordered_extent->inode;
2797         struct btrfs_root *root = BTRFS_I(inode)->root;
2798         struct btrfs_trans_handle *trans = NULL;
2799         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2800         struct extent_state *cached_state = NULL;
2801         struct new_sa_defrag_extent *new = NULL;
2802         int compress_type = 0;
2803         int ret = 0;
2804         u64 logical_len = ordered_extent->len;
2805         bool nolock;
2806         bool truncated = false;
2807
2808         nolock = btrfs_is_free_space_inode(inode);
2809
2810         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2811                 ret = -EIO;
2812                 goto out;
2813         }
2814
2815         btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
2816                                      ordered_extent->file_offset +
2817                                      ordered_extent->len - 1);
2818
2819         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2820                 truncated = true;
2821                 logical_len = ordered_extent->truncated_len;
2822                 /* Truncated the entire extent, don't bother adding */
2823                 if (!logical_len)
2824                         goto out;
2825         }
2826
2827         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2828                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2829                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2830                 if (nolock)
2831                         trans = btrfs_join_transaction_nolock(root);
2832                 else
2833                         trans = btrfs_join_transaction(root);
2834                 if (IS_ERR(trans)) {
2835                         ret = PTR_ERR(trans);
2836                         trans = NULL;
2837                         goto out;
2838                 }
2839                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2840                 ret = btrfs_update_inode_fallback(trans, root, inode);
2841                 if (ret) /* -ENOMEM or corruption */
2842                         btrfs_abort_transaction(trans, root, ret);
2843                 goto out;
2844         }
2845
2846         lock_extent_bits(io_tree, ordered_extent->file_offset,
2847                          ordered_extent->file_offset + ordered_extent->len - 1,
2848                          0, &cached_state);
2849
2850         ret = test_range_bit(io_tree, ordered_extent->file_offset,
2851                         ordered_extent->file_offset + ordered_extent->len - 1,
2852                         EXTENT_DEFRAG, 1, cached_state);
2853         if (ret) {
2854                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2855                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2856                         /* the inode is shared */
2857                         new = record_old_file_extents(inode, ordered_extent);
2858
2859                 clear_extent_bit(io_tree, ordered_extent->file_offset,
2860                         ordered_extent->file_offset + ordered_extent->len - 1,
2861                         EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2862         }
2863
2864         if (nolock)
2865                 trans = btrfs_join_transaction_nolock(root);
2866         else
2867                 trans = btrfs_join_transaction(root);
2868         if (IS_ERR(trans)) {
2869                 ret = PTR_ERR(trans);
2870                 trans = NULL;
2871                 goto out_unlock;
2872         }
2873
2874         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2875
2876         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2877                 compress_type = ordered_extent->compress_type;
2878         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2879                 BUG_ON(compress_type);
2880                 ret = btrfs_mark_extent_written(trans, inode,
2881                                                 ordered_extent->file_offset,
2882                                                 ordered_extent->file_offset +
2883                                                 logical_len);
2884         } else {
2885                 BUG_ON(root == root->fs_info->tree_root);
2886                 ret = insert_reserved_file_extent(trans, inode,
2887                                                 ordered_extent->file_offset,
2888                                                 ordered_extent->start,
2889                                                 ordered_extent->disk_len,
2890                                                 logical_len, logical_len,
2891                                                 compress_type, 0, 0,
2892                                                 BTRFS_FILE_EXTENT_REG);
2893                 if (!ret)
2894                         btrfs_release_delalloc_bytes(root,
2895                                                      ordered_extent->start,
2896                                                      ordered_extent->disk_len);
2897         }
2898         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2899                            ordered_extent->file_offset, ordered_extent->len,
2900                            trans->transid);
2901         if (ret < 0) {
2902                 btrfs_abort_transaction(trans, root, ret);
2903                 goto out_unlock;
2904         }
2905
2906         add_pending_csums(trans, inode, ordered_extent->file_offset,
2907                           &ordered_extent->list);
2908
2909         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2910         ret = btrfs_update_inode_fallback(trans, root, inode);
2911         if (ret) { /* -ENOMEM or corruption */
2912                 btrfs_abort_transaction(trans, root, ret);
2913                 goto out_unlock;
2914         }
2915         ret = 0;
2916 out_unlock:
2917         unlock_extent_cached(io_tree, ordered_extent->file_offset,
2918                              ordered_extent->file_offset +
2919                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
2920 out:
2921         if (root != root->fs_info->tree_root)
2922                 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2923         if (trans)
2924                 btrfs_end_transaction(trans, root);
2925
2926         if (ret || truncated) {
2927                 u64 start, end;
2928
2929                 if (truncated)
2930                         start = ordered_extent->file_offset + logical_len;
2931                 else
2932                         start = ordered_extent->file_offset;
2933                 end = ordered_extent->file_offset + ordered_extent->len - 1;
2934                 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2935
2936                 /* Drop the cache for the part of the extent we didn't write. */
2937                 btrfs_drop_extent_cache(inode, start, end, 0);
2938
2939                 /*
2940                  * If the ordered extent had an IOERR or something else went
2941                  * wrong we need to return the space for this ordered extent
2942                  * back to the allocator.  We only free the extent in the
2943                  * truncated case if we didn't write out the extent at all.
2944                  */
2945                 if ((ret || !logical_len) &&
2946                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2947                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2948                         btrfs_free_reserved_extent(root, ordered_extent->start,
2949                                                    ordered_extent->disk_len, 1);
2950         }
2951
2952
2953         /*
2954          * This needs to be done to make sure anybody waiting knows we are done
2955          * updating everything for this ordered extent.
2956          */
2957         btrfs_remove_ordered_extent(inode, ordered_extent);
2958
2959         /* for snapshot-aware defrag */
2960         if (new) {
2961                 if (ret) {
2962                         free_sa_defrag_extent(new);
2963                         atomic_dec(&root->fs_info->defrag_running);
2964                 } else {
2965                         relink_file_extents(new);
2966                 }
2967         }
2968
2969         /* once for us */
2970         btrfs_put_ordered_extent(ordered_extent);
2971         /* once for the tree */
2972         btrfs_put_ordered_extent(ordered_extent);
2973
2974         return ret;
2975 }
2976
2977 static void finish_ordered_fn(struct btrfs_work *work)
2978 {
2979         struct btrfs_ordered_extent *ordered_extent;
2980         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2981         btrfs_finish_ordered_io(ordered_extent);
2982 }
2983
2984 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2985                                 struct extent_state *state, int uptodate)
2986 {
2987         struct inode *inode = page->mapping->host;
2988         struct btrfs_root *root = BTRFS_I(inode)->root;
2989         struct btrfs_ordered_extent *ordered_extent = NULL;
2990         struct btrfs_workqueue *wq;
2991         btrfs_work_func_t func;
2992
2993         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2994
2995         ClearPagePrivate2(page);
2996         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2997                                             end - start + 1, uptodate))
2998                 return 0;
2999
3000         if (btrfs_is_free_space_inode(inode)) {
3001                 wq = root->fs_info->endio_freespace_worker;
3002                 func = btrfs_freespace_write_helper;
3003         } else {
3004                 wq = root->fs_info->endio_write_workers;
3005                 func = btrfs_endio_write_helper;
3006         }
3007
3008         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3009                         NULL);
3010         btrfs_queue_work(wq, &ordered_extent->work);
3011
3012         return 0;
3013 }
3014
3015 static int __readpage_endio_check(struct inode *inode,
3016                                   struct btrfs_io_bio *io_bio,
3017                                   int icsum, struct page *page,
3018                                   int pgoff, u64 start, size_t len)
3019 {
3020         char *kaddr;
3021         u32 csum_expected;
3022         u32 csum = ~(u32)0;
3023
3024         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3025
3026         kaddr = kmap_atomic(page);
3027         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3028         btrfs_csum_final(csum, (char *)&csum);
3029         if (csum != csum_expected)
3030                 goto zeroit;
3031
3032         kunmap_atomic(kaddr);
3033         return 0;
3034 zeroit:
3035         btrfs_warn_rl(BTRFS_I(inode)->root->fs_info,
3036                 "csum failed ino %llu off %llu csum %u expected csum %u",
3037                            btrfs_ino(inode), start, csum, csum_expected);
3038         memset(kaddr + pgoff, 1, len);
3039         flush_dcache_page(page);
3040         kunmap_atomic(kaddr);
3041         if (csum_expected == 0)
3042                 return 0;
3043         return -EIO;
3044 }
3045
3046 /*
3047  * when reads are done, we need to check csums to verify the data is correct
3048  * if there's a match, we allow the bio to finish.  If not, the code in
3049  * extent_io.c will try to find good copies for us.
3050  */
3051 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3052                                       u64 phy_offset, struct page *page,
3053                                       u64 start, u64 end, int mirror)
3054 {
3055         size_t offset = start - page_offset(page);
3056         struct inode *inode = page->mapping->host;
3057         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3058         struct btrfs_root *root = BTRFS_I(inode)->root;
3059
3060         if (PageChecked(page)) {
3061                 ClearPageChecked(page);
3062                 return 0;
3063         }
3064
3065         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3066                 return 0;
3067
3068         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3069             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3070                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
3071                                   GFP_NOFS);
3072                 return 0;
3073         }
3074
3075         phy_offset >>= inode->i_sb->s_blocksize_bits;
3076         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3077                                       start, (size_t)(end - start + 1));
3078 }
3079
3080 struct delayed_iput {
3081         struct list_head list;
3082         struct inode *inode;
3083 };
3084
3085 /* JDM: If this is fs-wide, why can't we add a pointer to
3086  * btrfs_inode instead and avoid the allocation? */
3087 void btrfs_add_delayed_iput(struct inode *inode)
3088 {
3089         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3090         struct delayed_iput *delayed;
3091
3092         if (atomic_add_unless(&inode->i_count, -1, 1))
3093                 return;
3094
3095         delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
3096         delayed->inode = inode;
3097
3098         spin_lock(&fs_info->delayed_iput_lock);
3099         list_add_tail(&delayed->list, &fs_info->delayed_iputs);
3100         spin_unlock(&fs_info->delayed_iput_lock);
3101 }
3102
3103 void btrfs_run_delayed_iputs(struct btrfs_root *root)
3104 {
3105         LIST_HEAD(list);
3106         struct btrfs_fs_info *fs_info = root->fs_info;
3107         struct delayed_iput *delayed;
3108         int empty;
3109
3110         spin_lock(&fs_info->delayed_iput_lock);
3111         empty = list_empty(&fs_info->delayed_iputs);
3112         spin_unlock(&fs_info->delayed_iput_lock);
3113         if (empty)
3114                 return;
3115
3116         down_read(&fs_info->delayed_iput_sem);
3117
3118         spin_lock(&fs_info->delayed_iput_lock);
3119         list_splice_init(&fs_info->delayed_iputs, &list);
3120         spin_unlock(&fs_info->delayed_iput_lock);
3121
3122         while (!list_empty(&list)) {
3123                 delayed = list_entry(list.next, struct delayed_iput, list);
3124                 list_del(&delayed->list);
3125                 iput(delayed->inode);
3126                 kfree(delayed);
3127         }
3128
3129         up_read(&root->fs_info->delayed_iput_sem);
3130 }
3131
3132 /*
3133  * This is called in transaction commit time. If there are no orphan
3134  * files in the subvolume, it removes orphan item and frees block_rsv
3135  * structure.
3136  */
3137 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3138                               struct btrfs_root *root)
3139 {
3140         struct btrfs_block_rsv *block_rsv;
3141         int ret;
3142
3143         if (atomic_read(&root->orphan_inodes) ||
3144             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3145                 return;
3146
3147         spin_lock(&root->orphan_lock);
3148         if (atomic_read(&root->orphan_inodes)) {
3149                 spin_unlock(&root->orphan_lock);
3150                 return;
3151         }
3152
3153         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3154                 spin_unlock(&root->orphan_lock);
3155                 return;
3156         }
3157
3158         block_rsv = root->orphan_block_rsv;
3159         root->orphan_block_rsv = NULL;
3160         spin_unlock(&root->orphan_lock);
3161
3162         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3163             btrfs_root_refs(&root->root_item) > 0) {
3164                 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
3165                                             root->root_key.objectid);
3166                 if (ret)
3167                         btrfs_abort_transaction(trans, root, ret);
3168                 else
3169                         clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3170                                   &root->state);
3171         }
3172
3173         if (block_rsv) {
3174                 WARN_ON(block_rsv->size > 0);
3175                 btrfs_free_block_rsv(root, block_rsv);
3176         }
3177 }
3178
3179 /*
3180  * This creates an orphan entry for the given inode in case something goes
3181  * wrong in the middle of an unlink/truncate.
3182  *
3183  * NOTE: caller of this function should reserve 5 units of metadata for
3184  *       this function.
3185  */
3186 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3187 {
3188         struct btrfs_root *root = BTRFS_I(inode)->root;
3189         struct btrfs_block_rsv *block_rsv = NULL;
3190         int reserve = 0;
3191         int insert = 0;
3192         int ret;
3193
3194         if (!root->orphan_block_rsv) {
3195                 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3196                 if (!block_rsv)
3197                         return -ENOMEM;
3198         }
3199
3200         spin_lock(&root->orphan_lock);
3201         if (!root->orphan_block_rsv) {
3202                 root->orphan_block_rsv = block_rsv;
3203         } else if (block_rsv) {
3204                 btrfs_free_block_rsv(root, block_rsv);
3205                 block_rsv = NULL;
3206         }
3207
3208         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3209                               &BTRFS_I(inode)->runtime_flags)) {
3210 #if 0
3211                 /*
3212                  * For proper ENOSPC handling, we should do orphan
3213                  * cleanup when mounting. But this introduces backward
3214                  * compatibility issue.
3215                  */
3216                 if (!xchg(&root->orphan_item_inserted, 1))
3217                         insert = 2;
3218                 else
3219                         insert = 1;
3220 #endif
3221                 insert = 1;
3222                 atomic_inc(&root->orphan_inodes);
3223         }
3224
3225         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3226                               &BTRFS_I(inode)->runtime_flags))
3227                 reserve = 1;
3228         spin_unlock(&root->orphan_lock);
3229
3230         /* grab metadata reservation from transaction handle */
3231         if (reserve) {
3232                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3233                 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3234         }
3235
3236         /* insert an orphan item to track this unlinked/truncated file */
3237         if (insert >= 1) {
3238                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3239                 if (ret) {
3240                         atomic_dec(&root->orphan_inodes);
3241                         if (reserve) {
3242                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3243                                           &BTRFS_I(inode)->runtime_flags);
3244                                 btrfs_orphan_release_metadata(inode);
3245                         }
3246                         if (ret != -EEXIST) {
3247                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3248                                           &BTRFS_I(inode)->runtime_flags);
3249                                 btrfs_abort_transaction(trans, root, ret);
3250                                 return ret;
3251                         }
3252                 }
3253                 ret = 0;
3254         }
3255
3256         /* insert an orphan item to track subvolume contains orphan files */
3257         if (insert >= 2) {
3258                 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3259                                                root->root_key.objectid);
3260                 if (ret && ret != -EEXIST) {
3261                         btrfs_abort_transaction(trans, root, ret);
3262                         return ret;
3263                 }
3264         }
3265         return 0;
3266 }
3267
3268 /*
3269  * We have done the truncate/delete so we can go ahead and remove the orphan
3270  * item for this particular inode.
3271  */
3272 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3273                             struct inode *inode)
3274 {
3275         struct btrfs_root *root = BTRFS_I(inode)->root;
3276         int delete_item = 0;
3277         int release_rsv = 0;
3278         int ret = 0;
3279
3280         spin_lock(&root->orphan_lock);
3281         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3282                                &BTRFS_I(inode)->runtime_flags))
3283                 delete_item = 1;
3284
3285         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3286                                &BTRFS_I(inode)->runtime_flags))
3287                 release_rsv = 1;
3288         spin_unlock(&root->orphan_lock);
3289
3290         if (delete_item) {
3291                 atomic_dec(&root->orphan_inodes);
3292                 if (trans)
3293                         ret = btrfs_del_orphan_item(trans, root,
3294                                                     btrfs_ino(inode));
3295         }
3296
3297         if (release_rsv)
3298                 btrfs_orphan_release_metadata(inode);
3299
3300         return ret;
3301 }
3302
3303 /*
3304  * this cleans up any orphans that may be left on the list from the last use
3305  * of this root.
3306  */
3307 int btrfs_orphan_cleanup(struct btrfs_root *root)
3308 {
3309         struct btrfs_path *path;
3310         struct extent_buffer *leaf;
3311         struct btrfs_key key, found_key;
3312         struct btrfs_trans_handle *trans;
3313         struct inode *inode;
3314         u64 last_objectid = 0;
3315         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3316
3317         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3318                 return 0;
3319
3320         path = btrfs_alloc_path();
3321         if (!path) {
3322                 ret = -ENOMEM;
3323                 goto out;
3324         }
3325         path->reada = -1;
3326
3327         key.objectid = BTRFS_ORPHAN_OBJECTID;
3328         key.type = BTRFS_ORPHAN_ITEM_KEY;
3329         key.offset = (u64)-1;
3330
3331         while (1) {
3332                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3333                 if (ret < 0)
3334                         goto out;
3335
3336                 /*
3337                  * if ret == 0 means we found what we were searching for, which
3338                  * is weird, but possible, so only screw with path if we didn't
3339                  * find the key and see if we have stuff that matches
3340                  */
3341                 if (ret > 0) {
3342                         ret = 0;
3343                         if (path->slots[0] == 0)
3344                                 break;
3345                         path->slots[0]--;
3346                 }
3347
3348                 /* pull out the item */
3349                 leaf = path->nodes[0];
3350                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3351
3352                 /* make sure the item matches what we want */
3353                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3354                         break;
3355                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3356                         break;
3357
3358                 /* release the path since we're done with it */
3359                 btrfs_release_path(path);
3360
3361                 /*
3362                  * this is where we are basically btrfs_lookup, without the
3363                  * crossing root thing.  we store the inode number in the
3364                  * offset of the orphan item.
3365                  */
3366
3367                 if (found_key.offset == last_objectid) {
3368                         btrfs_err(root->fs_info,
3369                                 "Error removing orphan entry, stopping orphan cleanup");
3370                         ret = -EINVAL;
3371                         goto out;
3372                 }
3373
3374                 last_objectid = found_key.offset;
3375
3376                 found_key.objectid = found_key.offset;
3377                 found_key.type = BTRFS_INODE_ITEM_KEY;
3378                 found_key.offset = 0;
3379                 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3380                 ret = PTR_ERR_OR_ZERO(inode);
3381                 if (ret && ret != -ESTALE)
3382                         goto out;
3383
3384                 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3385                         struct btrfs_root *dead_root;
3386                         struct btrfs_fs_info *fs_info = root->fs_info;
3387                         int is_dead_root = 0;
3388
3389                         /*
3390                          * this is an orphan in the tree root. Currently these
3391                          * could come from 2 sources:
3392                          *  a) a snapshot deletion in progress
3393                          *  b) a free space cache inode
3394                          * We need to distinguish those two, as the snapshot
3395                          * orphan must not get deleted.
3396                          * find_dead_roots already ran before us, so if this
3397                          * is a snapshot deletion, we should find the root
3398                          * in the dead_roots list
3399                          */
3400                         spin_lock(&fs_info->trans_lock);
3401                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3402                                             root_list) {
3403                                 if (dead_root->root_key.objectid ==
3404                                     found_key.objectid) {
3405                                         is_dead_root = 1;
3406                                         break;
3407                                 }
3408                         }
3409                         spin_unlock(&fs_info->trans_lock);
3410                         if (is_dead_root) {
3411                                 /* prevent this orphan from being found again */
3412                                 key.offset = found_key.objectid - 1;
3413                                 continue;
3414                         }
3415                 }
3416                 /*
3417                  * Inode is already gone but the orphan item is still there,
3418                  * kill the orphan item.
3419                  */
3420                 if (ret == -ESTALE) {
3421                         trans = btrfs_start_transaction(root, 1);
3422                         if (IS_ERR(trans)) {
3423                                 ret = PTR_ERR(trans);
3424                                 goto out;
3425                         }
3426                         btrfs_debug(root->fs_info, "auto deleting %Lu",
3427                                 found_key.objectid);
3428                         ret = btrfs_del_orphan_item(trans, root,
3429                                                     found_key.objectid);
3430                         btrfs_end_transaction(trans, root);
3431                         if (ret)
3432                                 goto out;
3433                         continue;
3434                 }
3435
3436                 /*
3437                  * add this inode to the orphan list so btrfs_orphan_del does
3438                  * the proper thing when we hit it
3439                  */
3440                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3441                         &BTRFS_I(inode)->runtime_flags);
3442                 atomic_inc(&root->orphan_inodes);
3443
3444                 /* if we have links, this was a truncate, lets do that */
3445                 if (inode->i_nlink) {
3446                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3447                                 iput(inode);
3448                                 continue;
3449                         }
3450                         nr_truncate++;
3451
3452                         /* 1 for the orphan item deletion. */
3453                         trans = btrfs_start_transaction(root, 1);
3454                         if (IS_ERR(trans)) {
3455                                 iput(inode);
3456                                 ret = PTR_ERR(trans);
3457                                 goto out;
3458                         }
3459                         ret = btrfs_orphan_add(trans, inode);
3460                         btrfs_end_transaction(trans, root);
3461                         if (ret) {
3462                                 iput(inode);
3463                                 goto out;
3464                         }
3465
3466                         ret = btrfs_truncate(inode);
3467                         if (ret)
3468                                 btrfs_orphan_del(NULL, inode);
3469                 } else {
3470                         nr_unlink++;
3471                 }
3472
3473                 /* this will do delete_inode and everything for us */
3474                 iput(inode);
3475                 if (ret)
3476                         goto out;
3477         }
3478         /* release the path since we're done with it */
3479         btrfs_release_path(path);
3480
3481         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3482
3483         if (root->orphan_block_rsv)
3484                 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3485                                         (u64)-1);
3486
3487         if (root->orphan_block_rsv ||
3488             test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3489                 trans = btrfs_join_transaction(root);
3490                 if (!IS_ERR(trans))
3491                         btrfs_end_transaction(trans, root);
3492         }
3493
3494         if (nr_unlink)
3495                 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3496         if (nr_truncate)
3497                 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3498
3499 out:
3500         if (ret)
3501                 btrfs_err(root->fs_info,
3502                         "could not do orphan cleanup %d", ret);
3503         btrfs_free_path(path);
3504         return ret;
3505 }
3506
3507 /*
3508  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3509  * don't find any xattrs, we know there can't be any acls.
3510  *
3511  * slot is the slot the inode is in, objectid is the objectid of the inode
3512  */
3513 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3514                                           int slot, u64 objectid,
3515                                           int *first_xattr_slot)
3516 {
3517         u32 nritems = btrfs_header_nritems(leaf);
3518         struct btrfs_key found_key;
3519         static u64 xattr_access = 0;
3520         static u64 xattr_default = 0;
3521         int scanned = 0;
3522
3523         if (!xattr_access) {
3524                 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3525                                         strlen(POSIX_ACL_XATTR_ACCESS));
3526                 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3527                                         strlen(POSIX_ACL_XATTR_DEFAULT));
3528         }
3529
3530         slot++;
3531         *first_xattr_slot = -1;
3532         while (slot < nritems) {
3533                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3534
3535                 /* we found a different objectid, there must not be acls */
3536                 if (found_key.objectid != objectid)
3537                         return 0;
3538
3539                 /* we found an xattr, assume we've got an acl */
3540                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3541                         if (*first_xattr_slot == -1)
3542                                 *first_xattr_slot = slot;
3543                         if (found_key.offset == xattr_access ||
3544                             found_key.offset == xattr_default)
3545                                 return 1;
3546                 }
3547
3548                 /*
3549                  * we found a key greater than an xattr key, there can't
3550                  * be any acls later on
3551                  */
3552                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3553                         return 0;
3554
3555                 slot++;
3556                 scanned++;
3557
3558                 /*
3559                  * it goes inode, inode backrefs, xattrs, extents,
3560                  * so if there are a ton of hard links to an inode there can
3561                  * be a lot of backrefs.  Don't waste time searching too hard,
3562                  * this is just an optimization
3563                  */
3564                 if (scanned >= 8)
3565                         break;
3566         }
3567         /* we hit the end of the leaf before we found an xattr or
3568          * something larger than an xattr.  We have to assume the inode
3569          * has acls
3570          */
3571         if (*first_xattr_slot == -1)
3572                 *first_xattr_slot = slot;
3573         return 1;
3574 }
3575
3576 /*
3577  * read an inode from the btree into the in-memory inode
3578  */
3579 static void btrfs_read_locked_inode(struct inode *inode)
3580 {
3581         struct btrfs_path *path;
3582         struct extent_buffer *leaf;
3583         struct btrfs_inode_item *inode_item;
3584         struct btrfs_root *root = BTRFS_I(inode)->root;
3585         struct btrfs_key location;
3586         unsigned long ptr;
3587         int maybe_acls;
3588         u32 rdev;
3589         int ret;
3590         bool filled = false;
3591         int first_xattr_slot;
3592
3593         ret = btrfs_fill_inode(inode, &rdev);
3594         if (!ret)
3595                 filled = true;
3596
3597         path = btrfs_alloc_path();
3598         if (!path)
3599                 goto make_bad;
3600
3601         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3602
3603         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3604         if (ret)
3605                 goto make_bad;
3606
3607         leaf = path->nodes[0];
3608
3609         if (filled)
3610                 goto cache_index;
3611
3612         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3613                                     struct btrfs_inode_item);
3614         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3615         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3616         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3617         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3618         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3619
3620         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3621         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3622
3623         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3624         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3625
3626         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3627         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3628
3629         BTRFS_I(inode)->i_otime.tv_sec =
3630                 btrfs_timespec_sec(leaf, &inode_item->otime);
3631         BTRFS_I(inode)->i_otime.tv_nsec =
3632                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3633
3634         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3635         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3636         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3637
3638         inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3639         inode->i_generation = BTRFS_I(inode)->generation;
3640         inode->i_rdev = 0;
3641         rdev = btrfs_inode_rdev(leaf, inode_item);
3642
3643         BTRFS_I(inode)->index_cnt = (u64)-1;
3644         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3645
3646 cache_index:
3647         /*
3648          * If we were modified in the current generation and evicted from memory
3649          * and then re-read we need to do a full sync since we don't have any
3650          * idea about which extents were modified before we were evicted from
3651          * cache.
3652          *
3653          * This is required for both inode re-read from disk and delayed inode
3654          * in delayed_nodes_tree.
3655          */
3656         if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3657                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3658                         &BTRFS_I(inode)->runtime_flags);
3659
3660         /*
3661          * We don't persist the id of the transaction where an unlink operation
3662          * against the inode was last made. So here we assume the inode might
3663          * have been evicted, and therefore the exact value of last_unlink_trans
3664          * lost, and set it to last_trans to avoid metadata inconsistencies
3665          * between the inode and its parent if the inode is fsync'ed and the log
3666          * replayed. For example, in the scenario:
3667          *
3668          * touch mydir/foo
3669          * ln mydir/foo mydir/bar
3670          * sync
3671          * unlink mydir/bar
3672          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3673          * xfs_io -c fsync mydir/foo
3674          * <power failure>
3675          * mount fs, triggers fsync log replay
3676          *
3677          * We must make sure that when we fsync our inode foo we also log its
3678          * parent inode, otherwise after log replay the parent still has the
3679          * dentry with the "bar" name but our inode foo has a link count of 1
3680          * and doesn't have an inode ref with the name "bar" anymore.
3681          *
3682          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3683          * but it guarantees correctness at the expense of ocassional full
3684          * transaction commits on fsync if our inode is a directory, or if our
3685          * inode is not a directory, logging its parent unnecessarily.
3686          */
3687         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3688
3689         path->slots[0]++;
3690         if (inode->i_nlink != 1 ||
3691             path->slots[0] >= btrfs_header_nritems(leaf))
3692                 goto cache_acl;
3693
3694         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3695         if (location.objectid != btrfs_ino(inode))
3696                 goto cache_acl;
3697
3698         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3699         if (location.type == BTRFS_INODE_REF_KEY) {
3700                 struct btrfs_inode_ref *ref;
3701
3702                 ref = (struct btrfs_inode_ref *)ptr;
3703                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3704         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3705                 struct btrfs_inode_extref *extref;
3706
3707                 extref = (struct btrfs_inode_extref *)ptr;
3708                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3709                                                                      extref);
3710         }
3711 cache_acl:
3712         /*
3713          * try to precache a NULL acl entry for files that don't have
3714          * any xattrs or acls
3715          */
3716         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3717                                            btrfs_ino(inode), &first_xattr_slot);
3718         if (first_xattr_slot != -1) {
3719                 path->slots[0] = first_xattr_slot;
3720                 ret = btrfs_load_inode_props(inode, path);
3721                 if (ret)
3722                         btrfs_err(root->fs_info,
3723                                   "error loading props for ino %llu (root %llu): %d",
3724                                   btrfs_ino(inode),
3725                                   root->root_key.objectid, ret);
3726         }
3727         btrfs_free_path(path);
3728
3729         if (!maybe_acls)
3730                 cache_no_acl(inode);
3731
3732         switch (inode->i_mode & S_IFMT) {
3733         case S_IFREG:
3734                 inode->i_mapping->a_ops = &btrfs_aops;
3735                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3736                 inode->i_fop = &btrfs_file_operations;
3737                 inode->i_op = &btrfs_file_inode_operations;
3738                 break;
3739         case S_IFDIR:
3740                 inode->i_fop = &btrfs_dir_file_operations;
3741                 if (root == root->fs_info->tree_root)
3742                         inode->i_op = &btrfs_dir_ro_inode_operations;
3743                 else
3744                         inode->i_op = &btrfs_dir_inode_operations;
3745                 break;
3746         case S_IFLNK:
3747                 inode->i_op = &btrfs_symlink_inode_operations;
3748                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3749                 break;
3750         default:
3751                 inode->i_op = &btrfs_special_inode_operations;
3752                 init_special_inode(inode, inode->i_mode, rdev);
3753                 break;
3754         }
3755
3756         btrfs_update_iflags(inode);
3757         return;
3758
3759 make_bad:
3760         btrfs_free_path(path);
3761         make_bad_inode(inode);
3762 }
3763
3764 /*
3765  * given a leaf and an inode, copy the inode fields into the leaf
3766  */
3767 static void fill_inode_item(struct btrfs_trans_handle *trans,
3768                             struct extent_buffer *leaf,
3769                             struct btrfs_inode_item *item,
3770                             struct inode *inode)
3771 {
3772         struct btrfs_map_token token;
3773
3774         btrfs_init_map_token(&token);
3775
3776         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3777         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3778         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3779                                    &token);
3780         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3781         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3782
3783         btrfs_set_token_timespec_sec(leaf, &item->atime,
3784                                      inode->i_atime.tv_sec, &token);
3785         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3786                                       inode->i_atime.tv_nsec, &token);
3787
3788         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3789                                      inode->i_mtime.tv_sec, &token);
3790         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3791                                       inode->i_mtime.tv_nsec, &token);
3792
3793         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3794                                      inode->i_ctime.tv_sec, &token);
3795         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3796                                       inode->i_ctime.tv_nsec, &token);
3797
3798         btrfs_set_token_timespec_sec(leaf, &item->otime,
3799                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3800         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3801                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3802
3803         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3804                                      &token);
3805         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3806                                          &token);
3807         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3808         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3809         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3810         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3811         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3812 }
3813
3814 /*
3815  * copy everything in the in-memory inode into the btree.
3816  */
3817 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3818                                 struct btrfs_root *root, struct inode *inode)
3819 {
3820         struct btrfs_inode_item *inode_item;
3821         struct btrfs_path *path;
3822         struct extent_buffer *leaf;
3823         int ret;
3824
3825         path = btrfs_alloc_path();
3826         if (!path)
3827                 return -ENOMEM;
3828
3829         path->leave_spinning = 1;
3830         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3831                                  1);
3832         if (ret) {
3833                 if (ret > 0)
3834                         ret = -ENOENT;
3835                 goto failed;
3836         }
3837
3838         leaf = path->nodes[0];
3839         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3840                                     struct btrfs_inode_item);
3841
3842         fill_inode_item(trans, leaf, inode_item, inode);
3843         btrfs_mark_buffer_dirty(leaf);
3844         btrfs_set_inode_last_trans(trans, inode);
3845         ret = 0;
3846 failed:
3847         btrfs_free_path(path);
3848         return ret;
3849 }
3850
3851 /*
3852  * copy everything in the in-memory inode into the btree.
3853  */
3854 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3855                                 struct btrfs_root *root, struct inode *inode)
3856 {
3857         int ret;
3858
3859         /*
3860          * If the inode is a free space inode, we can deadlock during commit
3861          * if we put it into the delayed code.
3862          *
3863          * The data relocation inode should also be directly updated
3864          * without delay
3865          */
3866         if (!btrfs_is_free_space_inode(inode)
3867             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3868             && !root->fs_info->log_root_recovering) {
3869                 btrfs_update_root_times(trans, root);
3870
3871                 ret = btrfs_delayed_update_inode(trans, root, inode);
3872                 if (!ret)
3873                         btrfs_set_inode_last_trans(trans, inode);
3874                 return ret;
3875         }
3876
3877         return btrfs_update_inode_item(trans, root, inode);
3878 }
3879
3880 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3881                                          struct btrfs_root *root,
3882                                          struct inode *inode)
3883 {
3884         int ret;
3885
3886         ret = btrfs_update_inode(trans, root, inode);
3887         if (ret == -ENOSPC)
3888                 return btrfs_update_inode_item(trans, root, inode);
3889         return ret;
3890 }
3891
3892 /*
3893  * unlink helper that gets used here in inode.c and in the tree logging
3894  * recovery code.  It remove a link in a directory with a given name, and
3895  * also drops the back refs in the inode to the directory
3896  */
3897 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3898                                 struct btrfs_root *root,
3899                                 struct inode *dir, struct inode *inode,
3900                                 const char *name, int name_len)
3901 {
3902         struct btrfs_path *path;
3903         int ret = 0;
3904         struct extent_buffer *leaf;
3905         struct btrfs_dir_item *di;
3906         struct btrfs_key key;
3907         u64 index;
3908         u64 ino = btrfs_ino(inode);
3909         u64 dir_ino = btrfs_ino(dir);
3910
3911         path = btrfs_alloc_path();
3912         if (!path) {
3913                 ret = -ENOMEM;
3914                 goto out;
3915         }
3916
3917         path->leave_spinning = 1;
3918         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3919                                     name, name_len, -1);
3920         if (IS_ERR(di)) {
3921                 ret = PTR_ERR(di);
3922                 goto err;
3923         }
3924         if (!di) {
3925                 ret = -ENOENT;
3926                 goto err;
3927         }
3928         leaf = path->nodes[0];
3929         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3930         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3931         if (ret)
3932                 goto err;
3933         btrfs_release_path(path);
3934
3935         /*
3936          * If we don't have dir index, we have to get it by looking up
3937          * the inode ref, since we get the inode ref, remove it directly,
3938          * it is unnecessary to do delayed deletion.
3939          *
3940          * But if we have dir index, needn't search inode ref to get it.
3941          * Since the inode ref is close to the inode item, it is better
3942          * that we delay to delete it, and just do this deletion when
3943          * we update the inode item.
3944          */
3945         if (BTRFS_I(inode)->dir_index) {
3946                 ret = btrfs_delayed_delete_inode_ref(inode);
3947                 if (!ret) {
3948                         index = BTRFS_I(inode)->dir_index;
3949                         goto skip_backref;
3950                 }
3951         }
3952
3953         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3954                                   dir_ino, &index);
3955         if (ret) {
3956                 btrfs_info(root->fs_info,
3957                         "failed to delete reference to %.*s, inode %llu parent %llu",
3958                         name_len, name, ino, dir_ino);
3959                 btrfs_abort_transaction(trans, root, ret);
3960                 goto err;
3961         }
3962 skip_backref:
3963         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3964         if (ret) {
3965                 btrfs_abort_transaction(trans, root, ret);
3966                 goto err;
3967         }
3968
3969         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
3970                                          inode, dir_ino);
3971         if (ret != 0 && ret != -ENOENT) {
3972                 btrfs_abort_transaction(trans, root, ret);
3973                 goto err;
3974         }
3975
3976         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3977                                            dir, index);
3978         if (ret == -ENOENT)
3979                 ret = 0;
3980         else if (ret)
3981                 btrfs_abort_transaction(trans, root, ret);
3982 err:
3983         btrfs_free_path(path);
3984         if (ret)
3985                 goto out;
3986
3987         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3988         inode_inc_iversion(inode);
3989         inode_inc_iversion(dir);
3990         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3991         ret = btrfs_update_inode(trans, root, dir);
3992 out:
3993         return ret;
3994 }
3995
3996 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3997                        struct btrfs_root *root,
3998                        struct inode *dir, struct inode *inode,
3999                        const char *name, int name_len)
4000 {
4001         int ret;
4002         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4003         if (!ret) {
4004                 drop_nlink(inode);
4005                 ret = btrfs_update_inode(trans, root, inode);
4006         }
4007         return ret;
4008 }
4009
4010 /*
4011  * helper to start transaction for unlink and rmdir.
4012  *
4013  * unlink and rmdir are special in btrfs, they do not always free space, so
4014  * if we cannot make our reservations the normal way try and see if there is
4015  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4016  * allow the unlink to occur.
4017  */
4018 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4019 {
4020         struct btrfs_trans_handle *trans;
4021         struct btrfs_root *root = BTRFS_I(dir)->root;
4022         int ret;
4023
4024         /*
4025          * 1 for the possible orphan item
4026          * 1 for the dir item
4027          * 1 for the dir index
4028          * 1 for the inode ref
4029          * 1 for the inode
4030          */
4031         trans = btrfs_start_transaction(root, 5);
4032         if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
4033                 return trans;
4034
4035         if (PTR_ERR(trans) == -ENOSPC) {
4036                 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4037
4038                 trans = btrfs_start_transaction(root, 0);
4039                 if (IS_ERR(trans))
4040                         return trans;
4041                 ret = btrfs_cond_migrate_bytes(root->fs_info,
4042                                                &root->fs_info->trans_block_rsv,
4043                                                num_bytes, 5);
4044                 if (ret) {
4045                         btrfs_end_transaction(trans, root);
4046                         return ERR_PTR(ret);
4047                 }
4048                 trans->block_rsv = &root->fs_info->trans_block_rsv;
4049                 trans->bytes_reserved = num_bytes;
4050         }
4051         return trans;
4052 }
4053
4054 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4055 {
4056         struct btrfs_root *root = BTRFS_I(dir)->root;
4057         struct btrfs_trans_handle *trans;
4058         struct inode *inode = d_inode(dentry);
4059         int ret;
4060
4061         trans = __unlink_start_trans(dir);
4062         if (IS_ERR(trans))
4063                 return PTR_ERR(trans);
4064
4065         btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
4066
4067         ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4068                                  dentry->d_name.name, dentry->d_name.len);
4069         if (ret)
4070                 goto out;
4071
4072         if (inode->i_nlink == 0) {
4073                 ret = btrfs_orphan_add(trans, inode);
4074                 if (ret)
4075                         goto out;
4076         }
4077
4078 out:
4079         btrfs_end_transaction(trans, root);
4080         btrfs_btree_balance_dirty(root);
4081         return ret;
4082 }
4083
4084 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4085                         struct btrfs_root *root,
4086                         struct inode *dir, u64 objectid,
4087                         const char *name, int name_len)
4088 {
4089         struct btrfs_path *path;
4090         struct extent_buffer *leaf;
4091         struct btrfs_dir_item *di;
4092         struct btrfs_key key;
4093         u64 index;
4094         int ret;
4095         u64 dir_ino = btrfs_ino(dir);
4096
4097         path = btrfs_alloc_path();
4098         if (!path)
4099                 return -ENOMEM;
4100
4101         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4102                                    name, name_len, -1);
4103         if (IS_ERR_OR_NULL(di)) {
4104                 if (!di)
4105                         ret = -ENOENT;
4106                 else
4107                         ret = PTR_ERR(di);
4108                 goto out;
4109         }
4110
4111         leaf = path->nodes[0];
4112         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4113         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4114         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4115         if (ret) {
4116                 btrfs_abort_transaction(trans, root, ret);
4117                 goto out;
4118         }
4119         btrfs_release_path(path);
4120
4121         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4122                                  objectid, root->root_key.objectid,
4123                                  dir_ino, &index, name, name_len);
4124         if (ret < 0) {
4125                 if (ret != -ENOENT) {
4126                         btrfs_abort_transaction(trans, root, ret);
4127                         goto out;
4128                 }
4129                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4130                                                  name, name_len);
4131                 if (IS_ERR_OR_NULL(di)) {
4132                         if (!di)
4133                                 ret = -ENOENT;
4134                         else
4135                                 ret = PTR_ERR(di);
4136                         btrfs_abort_transaction(trans, root, ret);
4137                         goto out;
4138                 }
4139
4140                 leaf = path->nodes[0];
4141                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4142                 btrfs_release_path(path);
4143                 index = key.offset;
4144         }
4145         btrfs_release_path(path);
4146
4147         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4148         if (ret) {
4149                 btrfs_abort_transaction(trans, root, ret);
4150                 goto out;
4151         }
4152
4153         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4154         inode_inc_iversion(dir);
4155         dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4156         ret = btrfs_update_inode_fallback(trans, root, dir);
4157         if (ret)
4158                 btrfs_abort_transaction(trans, root, ret);
4159 out:
4160         btrfs_free_path(path);
4161         return ret;
4162 }
4163
4164 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4165 {
4166         struct inode *inode = d_inode(dentry);
4167         int err = 0;
4168         struct btrfs_root *root = BTRFS_I(dir)->root;
4169         struct btrfs_trans_handle *trans;
4170
4171         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4172                 return -ENOTEMPTY;
4173         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
4174                 return -EPERM;
4175
4176         trans = __unlink_start_trans(dir);
4177         if (IS_ERR(trans))
4178                 return PTR_ERR(trans);
4179
4180         if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4181                 err = btrfs_unlink_subvol(trans, root, dir,
4182                                           BTRFS_I(inode)->location.objectid,
4183                                           dentry->d_name.name,
4184                                           dentry->d_name.len);
4185                 goto out;
4186         }
4187
4188         err = btrfs_orphan_add(trans, inode);
4189         if (err)
4190                 goto out;
4191
4192         /* now the directory is empty */
4193         err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4194                                  dentry->d_name.name, dentry->d_name.len);
4195         if (!err)
4196                 btrfs_i_size_write(inode, 0);
4197 out:
4198         btrfs_end_transaction(trans, root);
4199         btrfs_btree_balance_dirty(root);
4200
4201         return err;
4202 }
4203
4204 static int truncate_space_check(struct btrfs_trans_handle *trans,
4205                                 struct btrfs_root *root,
4206                                 u64 bytes_deleted)
4207 {
4208         int ret;
4209
4210         bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
4211         ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
4212                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4213         if (!ret)
4214                 trans->bytes_reserved += bytes_deleted;
4215         return ret;
4216
4217 }
4218
4219 static int truncate_inline_extent(struct inode *inode,
4220                                   struct btrfs_path *path,
4221                                   struct btrfs_key *found_key,
4222                                   const u64 item_end,
4223                                   const u64 new_size)
4224 {
4225         struct extent_buffer *leaf = path->nodes[0];
4226         int slot = path->slots[0];
4227         struct btrfs_file_extent_item *fi;
4228         u32 size = (u32)(new_size - found_key->offset);
4229         struct btrfs_root *root = BTRFS_I(inode)->root;
4230
4231         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4232
4233         if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4234                 loff_t offset = new_size;
4235                 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
4236
4237                 /*
4238                  * Zero out the remaining of the last page of our inline extent,
4239                  * instead of directly truncating our inline extent here - that
4240                  * would be much more complex (decompressing all the data, then
4241                  * compressing the truncated data, which might be bigger than
4242                  * the size of the inline extent, resize the extent, etc).
4243                  * We release the path because to get the page we might need to
4244                  * read the extent item from disk (data not in the page cache).
4245                  */
4246                 btrfs_release_path(path);
4247                 return btrfs_truncate_page(inode, offset, page_end - offset, 0);
4248         }
4249
4250         btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4251         size = btrfs_file_extent_calc_inline_size(size);
4252         btrfs_truncate_item(root, path, size, 1);
4253
4254         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4255                 inode_sub_bytes(inode, item_end + 1 - new_size);
4256
4257         return 0;
4258 }
4259
4260 /*
4261  * this can truncate away extent items, csum items and directory items.
4262  * It starts at a high offset and removes keys until it can't find
4263  * any higher than new_size
4264  *
4265  * csum items that cross the new i_size are truncated to the new size
4266  * as well.
4267  *
4268  * min_type is the minimum key type to truncate down to.  If set to 0, this
4269  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4270  */
4271 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4272                                struct btrfs_root *root,
4273                                struct inode *inode,
4274                                u64 new_size, u32 min_type)
4275 {
4276         struct btrfs_path *path;
4277         struct extent_buffer *leaf;
4278         struct btrfs_file_extent_item *fi;
4279         struct btrfs_key key;
4280         struct btrfs_key found_key;
4281         u64 extent_start = 0;
4282         u64 extent_num_bytes = 0;
4283         u64 extent_offset = 0;
4284         u64 item_end = 0;
4285         u64 last_size = new_size;
4286         u32 found_type = (u8)-1;
4287         int found_extent;
4288         int del_item;
4289         int pending_del_nr = 0;
4290         int pending_del_slot = 0;
4291         int extent_type = -1;
4292         int ret;
4293         int err = 0;
4294         u64 ino = btrfs_ino(inode);
4295         u64 bytes_deleted = 0;
4296         bool be_nice = 0;
4297         bool should_throttle = 0;
4298         bool should_end = 0;
4299
4300         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4301
4302         /*
4303          * for non-free space inodes and ref cows, we want to back off from
4304          * time to time
4305          */
4306         if (!btrfs_is_free_space_inode(inode) &&
4307             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4308                 be_nice = 1;
4309
4310         path = btrfs_alloc_path();
4311         if (!path)
4312                 return -ENOMEM;
4313         path->reada = -1;
4314
4315         /*
4316          * We want to drop from the next block forward in case this new size is
4317          * not block aligned since we will be keeping the last block of the
4318          * extent just the way it is.
4319          */
4320         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4321             root == root->fs_info->tree_root)
4322                 btrfs_drop_extent_cache(inode, ALIGN(new_size,
4323                                         root->sectorsize), (u64)-1, 0);
4324
4325         /*
4326          * This function is also used to drop the items in the log tree before
4327          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4328          * it is used to drop the loged items. So we shouldn't kill the delayed
4329          * items.
4330          */
4331         if (min_type == 0 && root == BTRFS_I(inode)->root)
4332                 btrfs_kill_delayed_inode_items(inode);
4333
4334         key.objectid = ino;
4335         key.offset = (u64)-1;
4336         key.type = (u8)-1;
4337
4338 search_again:
4339         /*
4340          * with a 16K leaf size and 128MB extents, you can actually queue
4341          * up a huge file in a single leaf.  Most of the time that
4342          * bytes_deleted is > 0, it will be huge by the time we get here
4343          */
4344         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4345                 if (btrfs_should_end_transaction(trans, root)) {
4346                         err = -EAGAIN;
4347                         goto error;
4348                 }
4349         }
4350
4351
4352         path->leave_spinning = 1;
4353         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4354         if (ret < 0) {
4355                 err = ret;
4356                 goto out;
4357         }
4358
4359         if (ret > 0) {
4360                 /* there are no items in the tree for us to truncate, we're
4361                  * done
4362                  */
4363                 if (path->slots[0] == 0)
4364                         goto out;
4365                 path->slots[0]--;
4366         }
4367
4368         while (1) {
4369                 fi = NULL;
4370                 leaf = path->nodes[0];
4371                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4372                 found_type = found_key.type;
4373
4374                 if (found_key.objectid != ino)
4375                         break;
4376
4377                 if (found_type < min_type)
4378                         break;
4379
4380                 item_end = found_key.offset;
4381                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4382                         fi = btrfs_item_ptr(leaf, path->slots[0],
4383                                             struct btrfs_file_extent_item);
4384                         extent_type = btrfs_file_extent_type(leaf, fi);
4385                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4386                                 item_end +=
4387                                     btrfs_file_extent_num_bytes(leaf, fi);
4388                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4389                                 item_end += btrfs_file_extent_inline_len(leaf,
4390                                                          path->slots[0], fi);
4391                         }
4392                         item_end--;
4393                 }
4394                 if (found_type > min_type) {
4395                         del_item = 1;
4396                 } else {
4397                         if (item_end < new_size)
4398                                 break;
4399                         if (found_key.offset >= new_size)
4400                                 del_item = 1;
4401                         else
4402                                 del_item = 0;
4403                 }
4404                 found_extent = 0;
4405                 /* FIXME, shrink the extent if the ref count is only 1 */
4406                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4407                         goto delete;
4408
4409                 if (del_item)
4410                         last_size = found_key.offset;
4411                 else
4412                         last_size = new_size;
4413
4414                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4415                         u64 num_dec;
4416                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4417                         if (!del_item) {
4418                                 u64 orig_num_bytes =
4419                                         btrfs_file_extent_num_bytes(leaf, fi);
4420                                 extent_num_bytes = ALIGN(new_size -
4421                                                 found_key.offset,
4422                                                 root->sectorsize);
4423                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4424                                                          extent_num_bytes);
4425                                 num_dec = (orig_num_bytes -
4426                                            extent_num_bytes);
4427                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4428                                              &root->state) &&
4429                                     extent_start != 0)
4430                                         inode_sub_bytes(inode, num_dec);
4431                                 btrfs_mark_buffer_dirty(leaf);
4432                         } else {
4433                                 extent_num_bytes =
4434                                         btrfs_file_extent_disk_num_bytes(leaf,
4435                                                                          fi);
4436                                 extent_offset = found_key.offset -
4437                                         btrfs_file_extent_offset(leaf, fi);
4438
4439                                 /* FIXME blocksize != 4096 */
4440                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4441                                 if (extent_start != 0) {
4442                                         found_extent = 1;
4443                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4444                                                      &root->state))
4445                                                 inode_sub_bytes(inode, num_dec);
4446                                 }
4447                         }
4448                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4449                         /*
4450                          * we can't truncate inline items that have had
4451                          * special encodings
4452                          */
4453                         if (!del_item &&
4454                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4455                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4456
4457                                 /*
4458                                  * Need to release path in order to truncate a
4459                                  * compressed extent. So delete any accumulated
4460                                  * extent items so far.
4461                                  */
4462                                 if (btrfs_file_extent_compression(leaf, fi) !=
4463                                     BTRFS_COMPRESS_NONE && pending_del_nr) {
4464                                         err = btrfs_del_items(trans, root, path,
4465                                                               pending_del_slot,
4466                                                               pending_del_nr);
4467                                         if (err) {
4468                                                 btrfs_abort_transaction(trans,
4469                                                                         root,
4470                                                                         err);
4471                                                 goto error;
4472                                         }
4473                                         pending_del_nr = 0;
4474                                 }
4475
4476                                 err = truncate_inline_extent(inode, path,
4477                                                              &found_key,
4478                                                              item_end,
4479                                                              new_size);
4480                                 if (err) {
4481                                         btrfs_abort_transaction(trans,
4482                                                                 root, err);
4483                                         goto error;
4484                                 }
4485                         } else if (test_bit(BTRFS_ROOT_REF_COWS,
4486                                             &root->state)) {
4487                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4488                         }
4489                 }
4490 delete:
4491                 if (del_item) {
4492                         if (!pending_del_nr) {
4493                                 /* no pending yet, add ourselves */
4494                                 pending_del_slot = path->slots[0];
4495                                 pending_del_nr = 1;
4496                         } else if (pending_del_nr &&
4497                                    path->slots[0] + 1 == pending_del_slot) {
4498                                 /* hop on the pending chunk */
4499                                 pending_del_nr++;
4500                                 pending_del_slot = path->slots[0];
4501                         } else {
4502                                 BUG();
4503                         }
4504                 } else {
4505                         break;
4506                 }
4507                 should_throttle = 0;
4508
4509                 if (found_extent &&
4510                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4511                      root == root->fs_info->tree_root)) {
4512                         btrfs_set_path_blocking(path);
4513                         bytes_deleted += extent_num_bytes;
4514                         ret = btrfs_free_extent(trans, root, extent_start,
4515                                                 extent_num_bytes, 0,
4516                                                 btrfs_header_owner(leaf),
4517                                                 ino, extent_offset, 0);
4518                         BUG_ON(ret);
4519                         if (btrfs_should_throttle_delayed_refs(trans, root))
4520                                 btrfs_async_run_delayed_refs(root,
4521                                         trans->delayed_ref_updates * 2, 0);
4522                         if (be_nice) {
4523                                 if (truncate_space_check(trans, root,
4524                                                          extent_num_bytes)) {
4525                                         should_end = 1;
4526                                 }
4527                                 if (btrfs_should_throttle_delayed_refs(trans,
4528                                                                        root)) {
4529                                         should_throttle = 1;
4530                                 }
4531                         }
4532                 }
4533
4534                 if (found_type == BTRFS_INODE_ITEM_KEY)
4535                         break;
4536
4537                 if (path->slots[0] == 0 ||
4538                     path->slots[0] != pending_del_slot ||
4539                     should_throttle || should_end) {
4540                         if (pending_del_nr) {
4541                                 ret = btrfs_del_items(trans, root, path,
4542                                                 pending_del_slot,
4543                                                 pending_del_nr);
4544                                 if (ret) {
4545                                         btrfs_abort_transaction(trans,
4546                                                                 root, ret);
4547                                         goto error;
4548                                 }
4549                                 pending_del_nr = 0;
4550                         }
4551                         btrfs_release_path(path);
4552                         if (should_throttle) {
4553                                 unsigned long updates = trans->delayed_ref_updates;
4554                                 if (updates) {
4555                                         trans->delayed_ref_updates = 0;
4556                                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4557                                         if (ret && !err)
4558                                                 err = ret;
4559                                 }
4560                         }
4561                         /*
4562                          * if we failed to refill our space rsv, bail out
4563                          * and let the transaction restart
4564                          */
4565                         if (should_end) {
4566                                 err = -EAGAIN;
4567                                 goto error;
4568                         }
4569                         goto search_again;
4570                 } else {
4571                         path->slots[0]--;
4572                 }
4573         }
4574 out:
4575         if (pending_del_nr) {
4576                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4577                                       pending_del_nr);
4578                 if (ret)
4579                         btrfs_abort_transaction(trans, root, ret);
4580         }
4581 error:
4582         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4583                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4584
4585         btrfs_free_path(path);
4586
4587         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4588                 unsigned long updates = trans->delayed_ref_updates;
4589                 if (updates) {
4590                         trans->delayed_ref_updates = 0;
4591                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4592                         if (ret && !err)
4593                                 err = ret;
4594                 }
4595         }
4596         return err;
4597 }
4598
4599 /*
4600  * btrfs_truncate_page - read, zero a chunk and write a page
4601  * @inode - inode that we're zeroing
4602  * @from - the offset to start zeroing
4603  * @len - the length to zero, 0 to zero the entire range respective to the
4604  *      offset
4605  * @front - zero up to the offset instead of from the offset on
4606  *
4607  * This will find the page for the "from" offset and cow the page and zero the
4608  * part we want to zero.  This is used with truncate and hole punching.
4609  */
4610 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4611                         int front)
4612 {
4613         struct address_space *mapping = inode->i_mapping;
4614         struct btrfs_root *root = BTRFS_I(inode)->root;
4615         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4616         struct btrfs_ordered_extent *ordered;
4617         struct extent_state *cached_state = NULL;
4618         char *kaddr;
4619         u32 blocksize = root->sectorsize;
4620         pgoff_t index = from >> PAGE_CACHE_SHIFT;
4621         unsigned offset = from & (PAGE_CACHE_SIZE-1);
4622         struct page *page;
4623         gfp_t mask = btrfs_alloc_write_mask(mapping);
4624         int ret = 0;
4625         u64 page_start;
4626         u64 page_end;
4627
4628         if ((offset & (blocksize - 1)) == 0 &&
4629             (!len || ((len & (blocksize - 1)) == 0)))
4630                 goto out;
4631         ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
4632         if (ret)
4633                 goto out;
4634
4635 again:
4636         page = find_or_create_page(mapping, index, mask);
4637         if (!page) {
4638                 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4639                 ret = -ENOMEM;
4640                 goto out;
4641         }
4642
4643         page_start = page_offset(page);
4644         page_end = page_start + PAGE_CACHE_SIZE - 1;
4645
4646         if (!PageUptodate(page)) {
4647                 ret = btrfs_readpage(NULL, page);
4648                 lock_page(page);
4649                 if (page->mapping != mapping) {
4650                         unlock_page(page);
4651                         page_cache_release(page);
4652                         goto again;
4653                 }
4654                 if (!PageUptodate(page)) {
4655                         ret = -EIO;
4656                         goto out_unlock;
4657                 }
4658         }
4659         wait_on_page_writeback(page);
4660
4661         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4662         set_page_extent_mapped(page);
4663
4664         ordered = btrfs_lookup_ordered_extent(inode, page_start);
4665         if (ordered) {
4666                 unlock_extent_cached(io_tree, page_start, page_end,
4667                                      &cached_state, GFP_NOFS);
4668                 unlock_page(page);
4669                 page_cache_release(page);
4670                 btrfs_start_ordered_extent(inode, ordered, 1);
4671                 btrfs_put_ordered_extent(ordered);
4672                 goto again;
4673         }
4674
4675         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4676                           EXTENT_DIRTY | EXTENT_DELALLOC |
4677                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4678                           0, 0, &cached_state, GFP_NOFS);
4679
4680         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4681                                         &cached_state);
4682         if (ret) {
4683                 unlock_extent_cached(io_tree, page_start, page_end,
4684                                      &cached_state, GFP_NOFS);
4685                 goto out_unlock;
4686         }
4687
4688         if (offset != PAGE_CACHE_SIZE) {
4689                 if (!len)
4690                         len = PAGE_CACHE_SIZE - offset;
4691                 kaddr = kmap(page);
4692                 if (front)
4693                         memset(kaddr, 0, offset);
4694                 else
4695                         memset(kaddr + offset, 0, len);
4696                 flush_dcache_page(page);
4697                 kunmap(page);
4698         }
4699         ClearPageChecked(page);
4700         set_page_dirty(page);
4701         unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4702                              GFP_NOFS);
4703
4704 out_unlock:
4705         if (ret)
4706                 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4707         unlock_page(page);
4708         page_cache_release(page);
4709 out:
4710         return ret;
4711 }
4712
4713 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4714                              u64 offset, u64 len)
4715 {
4716         struct btrfs_trans_handle *trans;
4717         int ret;
4718
4719         /*
4720          * Still need to make sure the inode looks like it's been updated so
4721          * that any holes get logged if we fsync.
4722          */
4723         if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4724                 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4725                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4726                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4727                 return 0;
4728         }
4729
4730         /*
4731          * 1 - for the one we're dropping
4732          * 1 - for the one we're adding
4733          * 1 - for updating the inode.
4734          */
4735         trans = btrfs_start_transaction(root, 3);
4736         if (IS_ERR(trans))
4737                 return PTR_ERR(trans);
4738
4739         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4740         if (ret) {
4741                 btrfs_abort_transaction(trans, root, ret);
4742                 btrfs_end_transaction(trans, root);
4743                 return ret;
4744         }
4745
4746         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4747                                        0, 0, len, 0, len, 0, 0, 0);
4748         if (ret)
4749                 btrfs_abort_transaction(trans, root, ret);
4750         else
4751                 btrfs_update_inode(trans, root, inode);
4752         btrfs_end_transaction(trans, root);
4753         return ret;
4754 }
4755
4756 /*
4757  * This function puts in dummy file extents for the area we're creating a hole
4758  * for.  So if we are truncating this file to a larger size we need to insert
4759  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4760  * the range between oldsize and size
4761  */
4762 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4763 {
4764         struct btrfs_root *root = BTRFS_I(inode)->root;
4765         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4766         struct extent_map *em = NULL;
4767         struct extent_state *cached_state = NULL;
4768         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4769         u64 hole_start = ALIGN(oldsize, root->sectorsize);
4770         u64 block_end = ALIGN(size, root->sectorsize);
4771         u64 last_byte;
4772         u64 cur_offset;
4773         u64 hole_size;
4774         int err = 0;
4775
4776         /*
4777          * If our size started in the middle of a page we need to zero out the
4778          * rest of the page before we expand the i_size, otherwise we could
4779          * expose stale data.
4780          */
4781         err = btrfs_truncate_page(inode, oldsize, 0, 0);
4782         if (err)
4783                 return err;
4784
4785         if (size <= hole_start)
4786                 return 0;
4787
4788         while (1) {
4789                 struct btrfs_ordered_extent *ordered;
4790
4791                 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4792                                  &cached_state);
4793                 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4794                                                      block_end - hole_start);
4795                 if (!ordered)
4796                         break;
4797                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4798                                      &cached_state, GFP_NOFS);
4799                 btrfs_start_ordered_extent(inode, ordered, 1);
4800                 btrfs_put_ordered_extent(ordered);
4801         }
4802
4803         cur_offset = hole_start;
4804         while (1) {
4805                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4806                                 block_end - cur_offset, 0);
4807                 if (IS_ERR(em)) {
4808                         err = PTR_ERR(em);
4809                         em = NULL;
4810                         break;
4811                 }
4812                 last_byte = min(extent_map_end(em), block_end);
4813                 last_byte = ALIGN(last_byte , root->sectorsize);
4814                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4815                         struct extent_map *hole_em;
4816                         hole_size = last_byte - cur_offset;
4817
4818                         err = maybe_insert_hole(root, inode, cur_offset,
4819                                                 hole_size);
4820                         if (err)
4821                                 break;
4822                         btrfs_drop_extent_cache(inode, cur_offset,
4823                                                 cur_offset + hole_size - 1, 0);
4824                         hole_em = alloc_extent_map();
4825                         if (!hole_em) {
4826                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4827                                         &BTRFS_I(inode)->runtime_flags);
4828                                 goto next;
4829                         }
4830                         hole_em->start = cur_offset;
4831                         hole_em->len = hole_size;
4832                         hole_em->orig_start = cur_offset;
4833
4834                         hole_em->block_start = EXTENT_MAP_HOLE;
4835                         hole_em->block_len = 0;
4836                         hole_em->orig_block_len = 0;
4837                         hole_em->ram_bytes = hole_size;
4838                         hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4839                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
4840                         hole_em->generation = root->fs_info->generation;
4841
4842                         while (1) {
4843                                 write_lock(&em_tree->lock);
4844                                 err = add_extent_mapping(em_tree, hole_em, 1);
4845                                 write_unlock(&em_tree->lock);
4846                                 if (err != -EEXIST)
4847                                         break;
4848                                 btrfs_drop_extent_cache(inode, cur_offset,
4849                                                         cur_offset +
4850                                                         hole_size - 1, 0);
4851                         }
4852                         free_extent_map(hole_em);
4853                 }
4854 next:
4855                 free_extent_map(em);
4856                 em = NULL;
4857                 cur_offset = last_byte;
4858                 if (cur_offset >= block_end)
4859                         break;
4860         }
4861         free_extent_map(em);
4862         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4863                              GFP_NOFS);
4864         return err;
4865 }
4866
4867 static int wait_snapshoting_atomic_t(atomic_t *a)
4868 {
4869         schedule();
4870         return 0;
4871 }
4872
4873 static void wait_for_snapshot_creation(struct btrfs_root *root)
4874 {
4875         while (true) {
4876                 int ret;
4877
4878                 ret = btrfs_start_write_no_snapshoting(root);
4879                 if (ret)
4880                         break;
4881                 wait_on_atomic_t(&root->will_be_snapshoted,
4882                                  wait_snapshoting_atomic_t,
4883                                  TASK_UNINTERRUPTIBLE);
4884         }
4885 }
4886
4887 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4888 {
4889         struct btrfs_root *root = BTRFS_I(inode)->root;
4890         struct btrfs_trans_handle *trans;
4891         loff_t oldsize = i_size_read(inode);
4892         loff_t newsize = attr->ia_size;
4893         int mask = attr->ia_valid;
4894         int ret;
4895
4896         /*
4897          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4898          * special case where we need to update the times despite not having
4899          * these flags set.  For all other operations the VFS set these flags
4900          * explicitly if it wants a timestamp update.
4901          */
4902         if (newsize != oldsize) {
4903                 inode_inc_iversion(inode);
4904                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4905                         inode->i_ctime = inode->i_mtime =
4906                                 current_fs_time(inode->i_sb);
4907         }
4908
4909         if (newsize > oldsize) {
4910                 truncate_pagecache(inode, newsize);
4911                 /*
4912                  * Don't do an expanding truncate while snapshoting is ongoing.
4913                  * This is to ensure the snapshot captures a fully consistent
4914                  * state of this file - if the snapshot captures this expanding
4915                  * truncation, it must capture all writes that happened before
4916                  * this truncation.
4917                  */
4918                 wait_for_snapshot_creation(root);
4919                 ret = btrfs_cont_expand(inode, oldsize, newsize);
4920                 if (ret) {
4921                         btrfs_end_write_no_snapshoting(root);
4922                         return ret;
4923                 }
4924
4925                 trans = btrfs_start_transaction(root, 1);
4926                 if (IS_ERR(trans)) {
4927                         btrfs_end_write_no_snapshoting(root);
4928                         return PTR_ERR(trans);
4929                 }
4930
4931                 i_size_write(inode, newsize);
4932                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4933                 ret = btrfs_update_inode(trans, root, inode);
4934                 btrfs_end_write_no_snapshoting(root);
4935                 btrfs_end_transaction(trans, root);
4936         } else {
4937
4938                 /*
4939                  * We're truncating a file that used to have good data down to
4940                  * zero. Make sure it gets into the ordered flush list so that
4941                  * any new writes get down to disk quickly.
4942                  */
4943                 if (newsize == 0)
4944                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4945                                 &BTRFS_I(inode)->runtime_flags);
4946
4947                 /*
4948                  * 1 for the orphan item we're going to add
4949                  * 1 for the orphan item deletion.
4950                  */
4951                 trans = btrfs_start_transaction(root, 2);
4952                 if (IS_ERR(trans))
4953                         return PTR_ERR(trans);
4954
4955                 /*
4956                  * We need to do this in case we fail at _any_ point during the
4957                  * actual truncate.  Once we do the truncate_setsize we could
4958                  * invalidate pages which forces any outstanding ordered io to
4959                  * be instantly completed which will give us extents that need
4960                  * to be truncated.  If we fail to get an orphan inode down we
4961                  * could have left over extents that were never meant to live,
4962                  * so we need to garuntee from this point on that everything
4963                  * will be consistent.
4964                  */
4965                 ret = btrfs_orphan_add(trans, inode);
4966                 btrfs_end_transaction(trans, root);
4967                 if (ret)
4968                         return ret;
4969
4970                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
4971                 truncate_setsize(inode, newsize);
4972
4973                 /* Disable nonlocked read DIO to avoid the end less truncate */
4974                 btrfs_inode_block_unlocked_dio(inode);
4975                 inode_dio_wait(inode);
4976                 btrfs_inode_resume_unlocked_dio(inode);
4977
4978                 ret = btrfs_truncate(inode);
4979                 if (ret && inode->i_nlink) {
4980                         int err;
4981
4982                         /*
4983                          * failed to truncate, disk_i_size is only adjusted down
4984                          * as we remove extents, so it should represent the true
4985                          * size of the inode, so reset the in memory size and
4986                          * delete our orphan entry.
4987                          */
4988                         trans = btrfs_join_transaction(root);
4989                         if (IS_ERR(trans)) {
4990                                 btrfs_orphan_del(NULL, inode);
4991                                 return ret;
4992                         }
4993                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
4994                         err = btrfs_orphan_del(trans, inode);
4995                         if (err)
4996                                 btrfs_abort_transaction(trans, root, err);
4997                         btrfs_end_transaction(trans, root);
4998                 }
4999         }
5000
5001         return ret;
5002 }
5003
5004 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5005 {
5006         struct inode *inode = d_inode(dentry);
5007         struct btrfs_root *root = BTRFS_I(inode)->root;
5008         int err;
5009
5010         if (btrfs_root_readonly(root))
5011                 return -EROFS;
5012
5013         err = inode_change_ok(inode, attr);
5014         if (err)
5015                 return err;
5016
5017         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5018                 err = btrfs_setsize(inode, attr);
5019                 if (err)
5020                         return err;
5021         }
5022
5023         if (attr->ia_valid) {
5024                 setattr_copy(inode, attr);
5025                 inode_inc_iversion(inode);
5026                 err = btrfs_dirty_inode(inode);
5027
5028                 if (!err && attr->ia_valid & ATTR_MODE)
5029                         err = posix_acl_chmod(inode, inode->i_mode);
5030         }
5031
5032         return err;
5033 }
5034
5035 /*
5036  * While truncating the inode pages during eviction, we get the VFS calling
5037  * btrfs_invalidatepage() against each page of the inode. This is slow because
5038  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5039  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5040  * extent_state structures over and over, wasting lots of time.
5041  *
5042  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5043  * those expensive operations on a per page basis and do only the ordered io
5044  * finishing, while we release here the extent_map and extent_state structures,
5045  * without the excessive merging and splitting.
5046  */
5047 static void evict_inode_truncate_pages(struct inode *inode)
5048 {
5049         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5050         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5051         struct rb_node *node;
5052
5053         ASSERT(inode->i_state & I_FREEING);
5054         truncate_inode_pages_final(&inode->i_data);
5055
5056         write_lock(&map_tree->lock);
5057         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5058                 struct extent_map *em;
5059
5060                 node = rb_first(&map_tree->map);
5061                 em = rb_entry(node, struct extent_map, rb_node);
5062                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5063                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5064                 remove_extent_mapping(map_tree, em);
5065                 free_extent_map(em);
5066                 if (need_resched()) {
5067                         write_unlock(&map_tree->lock);
5068                         cond_resched();
5069                         write_lock(&map_tree->lock);
5070                 }
5071         }
5072         write_unlock(&map_tree->lock);
5073
5074         /*
5075          * Keep looping until we have no more ranges in the io tree.
5076          * We can have ongoing bios started by readpages (called from readahead)
5077          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5078          * still in progress (unlocked the pages in the bio but did not yet
5079          * unlocked the ranges in the io tree). Therefore this means some
5080          * ranges can still be locked and eviction started because before
5081          * submitting those bios, which are executed by a separate task (work
5082          * queue kthread), inode references (inode->i_count) were not taken
5083          * (which would be dropped in the end io callback of each bio).
5084          * Therefore here we effectively end up waiting for those bios and
5085          * anyone else holding locked ranges without having bumped the inode's
5086          * reference count - if we don't do it, when they access the inode's
5087          * io_tree to unlock a range it may be too late, leading to an
5088          * use-after-free issue.
5089          */
5090         spin_lock(&io_tree->lock);
5091         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5092                 struct extent_state *state;
5093                 struct extent_state *cached_state = NULL;
5094                 u64 start;
5095                 u64 end;
5096
5097                 node = rb_first(&io_tree->state);
5098                 state = rb_entry(node, struct extent_state, rb_node);
5099                 start = state->start;
5100                 end = state->end;
5101                 spin_unlock(&io_tree->lock);
5102
5103                 lock_extent_bits(io_tree, start, end, 0, &cached_state);
5104                 clear_extent_bit(io_tree, start, end,
5105                                  EXTENT_LOCKED | EXTENT_DIRTY |
5106                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5107                                  EXTENT_DEFRAG, 1, 1,
5108                                  &cached_state, GFP_NOFS);
5109
5110                 cond_resched();
5111                 spin_lock(&io_tree->lock);
5112         }
5113         spin_unlock(&io_tree->lock);
5114 }
5115
5116 void btrfs_evict_inode(struct inode *inode)
5117 {
5118         struct btrfs_trans_handle *trans;
5119         struct btrfs_root *root = BTRFS_I(inode)->root;
5120         struct btrfs_block_rsv *rsv, *global_rsv;
5121         int steal_from_global = 0;
5122         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
5123         int ret;
5124
5125         trace_btrfs_inode_evict(inode);
5126
5127         evict_inode_truncate_pages(inode);
5128
5129         if (inode->i_nlink &&
5130             ((btrfs_root_refs(&root->root_item) != 0 &&
5131               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5132              btrfs_is_free_space_inode(inode)))
5133                 goto no_delete;
5134
5135         if (is_bad_inode(inode)) {
5136                 btrfs_orphan_del(NULL, inode);
5137                 goto no_delete;
5138         }
5139         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5140         if (!special_file(inode->i_mode))
5141                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5142
5143         btrfs_free_io_failure_record(inode, 0, (u64)-1);
5144
5145         if (root->fs_info->log_root_recovering) {
5146                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5147                                  &BTRFS_I(inode)->runtime_flags));
5148                 goto no_delete;
5149         }
5150
5151         if (inode->i_nlink > 0) {
5152                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5153                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5154                 goto no_delete;
5155         }
5156
5157         ret = btrfs_commit_inode_delayed_inode(inode);
5158         if (ret) {
5159                 btrfs_orphan_del(NULL, inode);
5160                 goto no_delete;
5161         }
5162
5163         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
5164         if (!rsv) {
5165                 btrfs_orphan_del(NULL, inode);
5166                 goto no_delete;
5167         }
5168         rsv->size = min_size;
5169         rsv->failfast = 1;
5170         global_rsv = &root->fs_info->global_block_rsv;
5171
5172         btrfs_i_size_write(inode, 0);
5173
5174         /*
5175          * This is a bit simpler than btrfs_truncate since we've already
5176          * reserved our space for our orphan item in the unlink, so we just
5177          * need to reserve some slack space in case we add bytes and update
5178          * inode item when doing the truncate.
5179          */
5180         while (1) {
5181                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5182                                              BTRFS_RESERVE_FLUSH_LIMIT);
5183
5184                 /*
5185                  * Try and steal from the global reserve since we will
5186                  * likely not use this space anyway, we want to try as
5187                  * hard as possible to get this to work.
5188                  */
5189                 if (ret)
5190                         steal_from_global++;
5191                 else
5192                         steal_from_global = 0;
5193                 ret = 0;
5194
5195                 /*
5196                  * steal_from_global == 0: we reserved stuff, hooray!
5197                  * steal_from_global == 1: we didn't reserve stuff, boo!
5198                  * steal_from_global == 2: we've committed, still not a lot of
5199                  * room but maybe we'll have room in the global reserve this
5200                  * time.
5201                  * steal_from_global == 3: abandon all hope!
5202                  */
5203                 if (steal_from_global > 2) {
5204                         btrfs_warn(root->fs_info,
5205                                 "Could not get space for a delete, will truncate on mount %d",
5206                                 ret);
5207                         btrfs_orphan_del(NULL, inode);
5208                         btrfs_free_block_rsv(root, rsv);
5209                         goto no_delete;
5210                 }
5211
5212                 trans = btrfs_join_transaction(root);
5213                 if (IS_ERR(trans)) {
5214                         btrfs_orphan_del(NULL, inode);
5215                         btrfs_free_block_rsv(root, rsv);
5216                         goto no_delete;
5217                 }
5218
5219                 /*
5220                  * We can't just steal from the global reserve, we need tomake
5221                  * sure there is room to do it, if not we need to commit and try
5222                  * again.
5223                  */
5224                 if (steal_from_global) {
5225                         if (!btrfs_check_space_for_delayed_refs(trans, root))
5226                                 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5227                                                               min_size);
5228                         else
5229                                 ret = -ENOSPC;
5230                 }
5231
5232                 /*
5233                  * Couldn't steal from the global reserve, we have too much
5234                  * pending stuff built up, commit the transaction and try it
5235                  * again.
5236                  */
5237                 if (ret) {
5238                         ret = btrfs_commit_transaction(trans, root);
5239                         if (ret) {
5240                                 btrfs_orphan_del(NULL, inode);
5241                                 btrfs_free_block_rsv(root, rsv);
5242                                 goto no_delete;
5243                         }
5244                         continue;
5245                 } else {
5246                         steal_from_global = 0;
5247                 }
5248
5249                 trans->block_rsv = rsv;
5250
5251                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5252                 if (ret != -ENOSPC && ret != -EAGAIN)
5253                         break;
5254
5255                 trans->block_rsv = &root->fs_info->trans_block_rsv;
5256                 btrfs_end_transaction(trans, root);
5257                 trans = NULL;
5258                 btrfs_btree_balance_dirty(root);
5259         }
5260
5261         btrfs_free_block_rsv(root, rsv);
5262
5263         /*
5264          * Errors here aren't a big deal, it just means we leave orphan items
5265          * in the tree.  They will be cleaned up on the next mount.
5266          */
5267         if (ret == 0) {
5268                 trans->block_rsv = root->orphan_block_rsv;
5269                 btrfs_orphan_del(trans, inode);
5270         } else {
5271                 btrfs_orphan_del(NULL, inode);
5272         }
5273
5274         trans->block_rsv = &root->fs_info->trans_block_rsv;
5275         if (!(root == root->fs_info->tree_root ||
5276               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5277                 btrfs_return_ino(root, btrfs_ino(inode));
5278
5279         btrfs_end_transaction(trans, root);
5280         btrfs_btree_balance_dirty(root);
5281 no_delete:
5282         btrfs_remove_delayed_node(inode);
5283         clear_inode(inode);
5284         return;
5285 }
5286
5287 /*
5288  * this returns the key found in the dir entry in the location pointer.
5289  * If no dir entries were found, location->objectid is 0.
5290  */
5291 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5292                                struct btrfs_key *location)
5293 {
5294         const char *name = dentry->d_name.name;
5295         int namelen = dentry->d_name.len;
5296         struct btrfs_dir_item *di;
5297         struct btrfs_path *path;
5298         struct btrfs_root *root = BTRFS_I(dir)->root;
5299         int ret = 0;
5300
5301         path = btrfs_alloc_path();
5302         if (!path)
5303                 return -ENOMEM;
5304
5305         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
5306                                     namelen, 0);
5307         if (IS_ERR(di))
5308                 ret = PTR_ERR(di);
5309
5310         if (IS_ERR_OR_NULL(di))
5311                 goto out_err;
5312
5313         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5314 out:
5315         btrfs_free_path(path);
5316         return ret;
5317 out_err:
5318         location->objectid = 0;
5319         goto out;
5320 }
5321
5322 /*
5323  * when we hit a tree root in a directory, the btrfs part of the inode
5324  * needs to be changed to reflect the root directory of the tree root.  This
5325  * is kind of like crossing a mount point.
5326  */
5327 static int fixup_tree_root_location(struct btrfs_root *root,
5328                                     struct inode *dir,
5329                                     struct dentry *dentry,
5330                                     struct btrfs_key *location,
5331                                     struct btrfs_root **sub_root)
5332 {
5333         struct btrfs_path *path;
5334         struct btrfs_root *new_root;
5335         struct btrfs_root_ref *ref;
5336         struct extent_buffer *leaf;
5337         struct btrfs_key key;
5338         int ret;
5339         int err = 0;
5340
5341         path = btrfs_alloc_path();
5342         if (!path) {
5343                 err = -ENOMEM;
5344                 goto out;
5345         }
5346
5347         err = -ENOENT;
5348         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5349         key.type = BTRFS_ROOT_REF_KEY;
5350         key.offset = location->objectid;
5351
5352         ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
5353                                 0, 0);
5354         if (ret) {
5355                 if (ret < 0)
5356                         err = ret;
5357                 goto out;
5358         }
5359
5360         leaf = path->nodes[0];
5361         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5362         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5363             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5364                 goto out;
5365
5366         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5367                                    (unsigned long)(ref + 1),
5368                                    dentry->d_name.len);
5369         if (ret)
5370                 goto out;
5371
5372         btrfs_release_path(path);
5373
5374         new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
5375         if (IS_ERR(new_root)) {
5376                 err = PTR_ERR(new_root);
5377                 goto out;
5378         }
5379
5380         *sub_root = new_root;
5381         location->objectid = btrfs_root_dirid(&new_root->root_item);
5382         location->type = BTRFS_INODE_ITEM_KEY;
5383         location->offset = 0;
5384         err = 0;
5385 out:
5386         btrfs_free_path(path);
5387         return err;
5388 }
5389
5390 static void inode_tree_add(struct inode *inode)
5391 {
5392         struct btrfs_root *root = BTRFS_I(inode)->root;
5393         struct btrfs_inode *entry;
5394         struct rb_node **p;
5395         struct rb_node *parent;
5396         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5397         u64 ino = btrfs_ino(inode);
5398
5399         if (inode_unhashed(inode))
5400                 return;
5401         parent = NULL;
5402         spin_lock(&root->inode_lock);
5403         p = &root->inode_tree.rb_node;
5404         while (*p) {
5405                 parent = *p;
5406                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5407
5408                 if (ino < btrfs_ino(&entry->vfs_inode))
5409                         p = &parent->rb_left;
5410                 else if (ino > btrfs_ino(&entry->vfs_inode))
5411                         p = &parent->rb_right;
5412                 else {
5413                         WARN_ON(!(entry->vfs_inode.i_state &
5414                                   (I_WILL_FREE | I_FREEING)));
5415                         rb_replace_node(parent, new, &root->inode_tree);
5416                         RB_CLEAR_NODE(parent);
5417                         spin_unlock(&root->inode_lock);
5418                         return;
5419                 }
5420         }
5421         rb_link_node(new, parent, p);
5422         rb_insert_color(new, &root->inode_tree);
5423         spin_unlock(&root->inode_lock);
5424 }
5425
5426 static void inode_tree_del(struct inode *inode)
5427 {
5428         struct btrfs_root *root = BTRFS_I(inode)->root;
5429         int empty = 0;
5430
5431         spin_lock(&root->inode_lock);
5432         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5433                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5434                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5435                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5436         }
5437         spin_unlock(&root->inode_lock);
5438
5439         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5440                 synchronize_srcu(&root->fs_info->subvol_srcu);
5441                 spin_lock(&root->inode_lock);
5442                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5443                 spin_unlock(&root->inode_lock);
5444                 if (empty)
5445                         btrfs_add_dead_root(root);
5446         }
5447 }
5448
5449 void btrfs_invalidate_inodes(struct btrfs_root *root)
5450 {
5451         struct rb_node *node;
5452         struct rb_node *prev;
5453         struct btrfs_inode *entry;
5454         struct inode *inode;
5455         u64 objectid = 0;
5456
5457         if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
5458                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5459
5460         spin_lock(&root->inode_lock);
5461 again:
5462         node = root->inode_tree.rb_node;
5463         prev = NULL;
5464         while (node) {
5465                 prev = node;
5466                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5467
5468                 if (objectid < btrfs_ino(&entry->vfs_inode))
5469                         node = node->rb_left;
5470                 else if (objectid > btrfs_ino(&entry->vfs_inode))
5471                         node = node->rb_right;
5472                 else
5473                         break;
5474         }
5475         if (!node) {
5476                 while (prev) {
5477                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
5478                         if (objectid <= btrfs_ino(&entry->vfs_inode)) {
5479                                 node = prev;
5480                                 break;
5481                         }
5482                         prev = rb_next(prev);
5483                 }
5484         }
5485         while (node) {
5486                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5487                 objectid = btrfs_ino(&entry->vfs_inode) + 1;
5488                 inode = igrab(&entry->vfs_inode);
5489                 if (inode) {
5490                         spin_unlock(&root->inode_lock);
5491                         if (atomic_read(&inode->i_count) > 1)
5492                                 d_prune_aliases(inode);
5493                         /*
5494                          * btrfs_drop_inode will have it removed from
5495                          * the inode cache when its usage count
5496                          * hits zero.
5497                          */
5498                         iput(inode);
5499                         cond_resched();
5500                         spin_lock(&root->inode_lock);
5501                         goto again;
5502                 }
5503
5504                 if (cond_resched_lock(&root->inode_lock))
5505                         goto again;
5506
5507                 node = rb_next(node);
5508         }
5509         spin_unlock(&root->inode_lock);
5510 }
5511
5512 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5513 {
5514         struct btrfs_iget_args *args = p;
5515         inode->i_ino = args->location->objectid;
5516         memcpy(&BTRFS_I(inode)->location, args->location,
5517                sizeof(*args->location));
5518         BTRFS_I(inode)->root = args->root;
5519         return 0;
5520 }
5521
5522 static int btrfs_find_actor(struct inode *inode, void *opaque)
5523 {
5524         struct btrfs_iget_args *args = opaque;
5525         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5526                 args->root == BTRFS_I(inode)->root;
5527 }
5528
5529 static struct inode *btrfs_iget_locked(struct super_block *s,
5530                                        struct btrfs_key *location,
5531                                        struct btrfs_root *root)
5532 {
5533         struct inode *inode;
5534         struct btrfs_iget_args args;
5535         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5536
5537         args.location = location;
5538         args.root = root;
5539
5540         inode = iget5_locked(s, hashval, btrfs_find_actor,
5541                              btrfs_init_locked_inode,
5542                              (void *)&args);
5543         return inode;
5544 }
5545
5546 /* Get an inode object given its location and corresponding root.
5547  * Returns in *is_new if the inode was read from disk
5548  */
5549 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5550                          struct btrfs_root *root, int *new)
5551 {
5552         struct inode *inode;
5553
5554         inode = btrfs_iget_locked(s, location, root);
5555         if (!inode)
5556                 return ERR_PTR(-ENOMEM);
5557
5558         if (inode->i_state & I_NEW) {
5559                 btrfs_read_locked_inode(inode);
5560                 if (!is_bad_inode(inode)) {
5561                         inode_tree_add(inode);
5562                         unlock_new_inode(inode);
5563                         if (new)
5564                                 *new = 1;
5565                 } else {
5566                         unlock_new_inode(inode);
5567                         iput(inode);
5568                         inode = ERR_PTR(-ESTALE);
5569                 }
5570         }
5571
5572         return inode;
5573 }
5574
5575 static struct inode *new_simple_dir(struct super_block *s,
5576                                     struct btrfs_key *key,
5577                                     struct btrfs_root *root)
5578 {
5579         struct inode *inode = new_inode(s);
5580
5581         if (!inode)
5582                 return ERR_PTR(-ENOMEM);
5583
5584         BTRFS_I(inode)->root = root;
5585         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5586         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5587
5588         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5589         inode->i_op = &btrfs_dir_ro_inode_operations;
5590         inode->i_fop = &simple_dir_operations;
5591         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5592         inode->i_mtime = CURRENT_TIME;
5593         inode->i_atime = inode->i_mtime;
5594         inode->i_ctime = inode->i_mtime;
5595         BTRFS_I(inode)->i_otime = inode->i_mtime;
5596
5597         return inode;
5598 }
5599
5600 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5601 {
5602         struct inode *inode;
5603         struct btrfs_root *root = BTRFS_I(dir)->root;
5604         struct btrfs_root *sub_root = root;
5605         struct btrfs_key location;
5606         int index;
5607         int ret = 0;
5608
5609         if (dentry->d_name.len > BTRFS_NAME_LEN)
5610                 return ERR_PTR(-ENAMETOOLONG);
5611
5612         ret = btrfs_inode_by_name(dir, dentry, &location);
5613         if (ret < 0)
5614                 return ERR_PTR(ret);
5615
5616         if (location.objectid == 0)
5617                 return ERR_PTR(-ENOENT);
5618
5619         if (location.type == BTRFS_INODE_ITEM_KEY) {
5620                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5621                 return inode;
5622         }
5623
5624         BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5625
5626         index = srcu_read_lock(&root->fs_info->subvol_srcu);
5627         ret = fixup_tree_root_location(root, dir, dentry,
5628                                        &location, &sub_root);
5629         if (ret < 0) {
5630                 if (ret != -ENOENT)
5631                         inode = ERR_PTR(ret);
5632                 else
5633                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5634         } else {
5635                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5636         }
5637         srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5638
5639         if (!IS_ERR(inode) && root != sub_root) {
5640                 down_read(&root->fs_info->cleanup_work_sem);
5641                 if (!(inode->i_sb->s_flags & MS_RDONLY))
5642                         ret = btrfs_orphan_cleanup(sub_root);
5643                 up_read(&root->fs_info->cleanup_work_sem);
5644                 if (ret) {
5645                         iput(inode);
5646                         inode = ERR_PTR(ret);
5647                 }
5648         }
5649
5650         return inode;
5651 }
5652
5653 static int btrfs_dentry_delete(const struct dentry *dentry)
5654 {
5655         struct btrfs_root *root;
5656         struct inode *inode = d_inode(dentry);
5657
5658         if (!inode && !IS_ROOT(dentry))
5659                 inode = d_inode(dentry->d_parent);
5660
5661         if (inode) {
5662                 root = BTRFS_I(inode)->root;
5663                 if (btrfs_root_refs(&root->root_item) == 0)
5664                         return 1;
5665
5666                 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5667                         return 1;
5668         }
5669         return 0;
5670 }
5671
5672 static void btrfs_dentry_release(struct dentry *dentry)
5673 {
5674         kfree(dentry->d_fsdata);
5675 }
5676
5677 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5678                                    unsigned int flags)
5679 {
5680         struct inode *inode;
5681
5682         inode = btrfs_lookup_dentry(dir, dentry);
5683         if (IS_ERR(inode)) {
5684                 if (PTR_ERR(inode) == -ENOENT)
5685                         inode = NULL;
5686                 else
5687                         return ERR_CAST(inode);
5688         }
5689
5690         return d_splice_alias(inode, dentry);
5691 }
5692
5693 unsigned char btrfs_filetype_table[] = {
5694         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5695 };
5696
5697 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5698 {
5699         struct inode *inode = file_inode(file);
5700         struct btrfs_root *root = BTRFS_I(inode)->root;
5701         struct btrfs_item *item;
5702         struct btrfs_dir_item *di;
5703         struct btrfs_key key;
5704         struct btrfs_key found_key;
5705         struct btrfs_path *path;
5706         struct list_head ins_list;
5707         struct list_head del_list;
5708         int ret;
5709         struct extent_buffer *leaf;
5710         int slot;
5711         unsigned char d_type;
5712         int over = 0;
5713         u32 di_cur;
5714         u32 di_total;
5715         u32 di_len;
5716         int key_type = BTRFS_DIR_INDEX_KEY;
5717         char tmp_name[32];
5718         char *name_ptr;
5719         int name_len;
5720         int is_curr = 0;        /* ctx->pos points to the current index? */
5721
5722         /* FIXME, use a real flag for deciding about the key type */
5723         if (root->fs_info->tree_root == root)
5724                 key_type = BTRFS_DIR_ITEM_KEY;
5725
5726         if (!dir_emit_dots(file, ctx))
5727                 return 0;
5728
5729         path = btrfs_alloc_path();
5730         if (!path)
5731                 return -ENOMEM;
5732
5733         path->reada = 1;
5734
5735         if (key_type == BTRFS_DIR_INDEX_KEY) {
5736                 INIT_LIST_HEAD(&ins_list);
5737                 INIT_LIST_HEAD(&del_list);
5738                 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5739         }
5740
5741         key.type = key_type;
5742         key.offset = ctx->pos;
5743         key.objectid = btrfs_ino(inode);
5744
5745         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5746         if (ret < 0)
5747                 goto err;
5748
5749         while (1) {
5750                 leaf = path->nodes[0];
5751                 slot = path->slots[0];
5752                 if (slot >= btrfs_header_nritems(leaf)) {
5753                         ret = btrfs_next_leaf(root, path);
5754                         if (ret < 0)
5755                                 goto err;
5756                         else if (ret > 0)
5757                                 break;
5758                         continue;
5759                 }
5760
5761                 item = btrfs_item_nr(slot);
5762                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5763
5764                 if (found_key.objectid != key.objectid)
5765                         break;
5766                 if (found_key.type != key_type)
5767                         break;
5768                 if (found_key.offset < ctx->pos)
5769                         goto next;
5770                 if (key_type == BTRFS_DIR_INDEX_KEY &&
5771                     btrfs_should_delete_dir_index(&del_list,
5772                                                   found_key.offset))
5773                         goto next;
5774
5775                 ctx->pos = found_key.offset;
5776                 is_curr = 1;
5777
5778                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5779                 di_cur = 0;
5780                 di_total = btrfs_item_size(leaf, item);
5781
5782                 while (di_cur < di_total) {
5783                         struct btrfs_key location;
5784
5785                         if (verify_dir_item(root, leaf, di))
5786                                 break;
5787
5788                         name_len = btrfs_dir_name_len(leaf, di);
5789                         if (name_len <= sizeof(tmp_name)) {
5790                                 name_ptr = tmp_name;
5791                         } else {
5792                                 name_ptr = kmalloc(name_len, GFP_NOFS);
5793                                 if (!name_ptr) {
5794                                         ret = -ENOMEM;
5795                                         goto err;
5796                                 }
5797                         }
5798                         read_extent_buffer(leaf, name_ptr,
5799                                            (unsigned long)(di + 1), name_len);
5800
5801                         d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5802                         btrfs_dir_item_key_to_cpu(leaf, di, &location);
5803
5804
5805                         /* is this a reference to our own snapshot? If so
5806                          * skip it.
5807                          *
5808                          * In contrast to old kernels, we insert the snapshot's
5809                          * dir item and dir index after it has been created, so
5810                          * we won't find a reference to our own snapshot. We
5811                          * still keep the following code for backward
5812                          * compatibility.
5813                          */
5814                         if (location.type == BTRFS_ROOT_ITEM_KEY &&
5815                             location.objectid == root->root_key.objectid) {
5816                                 over = 0;
5817                                 goto skip;
5818                         }
5819                         over = !dir_emit(ctx, name_ptr, name_len,
5820                                        location.objectid, d_type);
5821
5822 skip:
5823                         if (name_ptr != tmp_name)
5824                                 kfree(name_ptr);
5825
5826                         if (over)
5827                                 goto nopos;
5828                         di_len = btrfs_dir_name_len(leaf, di) +
5829                                  btrfs_dir_data_len(leaf, di) + sizeof(*di);
5830                         di_cur += di_len;
5831                         di = (struct btrfs_dir_item *)((char *)di + di_len);
5832                 }
5833 next:
5834                 path->slots[0]++;
5835         }
5836
5837         if (key_type == BTRFS_DIR_INDEX_KEY) {
5838                 if (is_curr)
5839                         ctx->pos++;
5840                 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5841                 if (ret)
5842                         goto nopos;
5843         }
5844
5845         /* Reached end of directory/root. Bump pos past the last item. */
5846         ctx->pos++;
5847
5848         /*
5849          * Stop new entries from being returned after we return the last
5850          * entry.
5851          *
5852          * New directory entries are assigned a strictly increasing
5853          * offset.  This means that new entries created during readdir
5854          * are *guaranteed* to be seen in the future by that readdir.
5855          * This has broken buggy programs which operate on names as
5856          * they're returned by readdir.  Until we re-use freed offsets
5857          * we have this hack to stop new entries from being returned
5858          * under the assumption that they'll never reach this huge
5859          * offset.
5860          *
5861          * This is being careful not to overflow 32bit loff_t unless the
5862          * last entry requires it because doing so has broken 32bit apps
5863          * in the past.
5864          */
5865         if (key_type == BTRFS_DIR_INDEX_KEY) {
5866                 if (ctx->pos >= INT_MAX)
5867                         ctx->pos = LLONG_MAX;
5868                 else
5869                         ctx->pos = INT_MAX;
5870         }
5871 nopos:
5872         ret = 0;
5873 err:
5874         if (key_type == BTRFS_DIR_INDEX_KEY)
5875                 btrfs_put_delayed_items(&ins_list, &del_list);
5876         btrfs_free_path(path);
5877         return ret;
5878 }
5879
5880 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5881 {
5882         struct btrfs_root *root = BTRFS_I(inode)->root;
5883         struct btrfs_trans_handle *trans;
5884         int ret = 0;
5885         bool nolock = false;
5886
5887         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5888                 return 0;
5889
5890         if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5891                 nolock = true;
5892
5893         if (wbc->sync_mode == WB_SYNC_ALL) {
5894                 if (nolock)
5895                         trans = btrfs_join_transaction_nolock(root);
5896                 else
5897                         trans = btrfs_join_transaction(root);
5898                 if (IS_ERR(trans))
5899                         return PTR_ERR(trans);
5900                 ret = btrfs_commit_transaction(trans, root);
5901         }
5902         return ret;
5903 }
5904
5905 /*
5906  * This is somewhat expensive, updating the tree every time the
5907  * inode changes.  But, it is most likely to find the inode in cache.
5908  * FIXME, needs more benchmarking...there are no reasons other than performance
5909  * to keep or drop this code.
5910  */
5911 static int btrfs_dirty_inode(struct inode *inode)
5912 {
5913         struct btrfs_root *root = BTRFS_I(inode)->root;
5914         struct btrfs_trans_handle *trans;
5915         int ret;
5916
5917         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5918                 return 0;
5919
5920         trans = btrfs_join_transaction(root);
5921         if (IS_ERR(trans))
5922                 return PTR_ERR(trans);
5923
5924         ret = btrfs_update_inode(trans, root, inode);
5925         if (ret && ret == -ENOSPC) {
5926                 /* whoops, lets try again with the full transaction */
5927                 btrfs_end_transaction(trans, root);
5928                 trans = btrfs_start_transaction(root, 1);
5929                 if (IS_ERR(trans))
5930                         return PTR_ERR(trans);
5931
5932                 ret = btrfs_update_inode(trans, root, inode);
5933         }
5934         btrfs_end_transaction(trans, root);
5935         if (BTRFS_I(inode)->delayed_node)
5936                 btrfs_balance_delayed_items(root);
5937
5938         return ret;
5939 }
5940
5941 /*
5942  * This is a copy of file_update_time.  We need this so we can return error on
5943  * ENOSPC for updating the inode in the case of file write and mmap writes.
5944  */
5945 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5946                              int flags)
5947 {
5948         struct btrfs_root *root = BTRFS_I(inode)->root;
5949
5950         if (btrfs_root_readonly(root))
5951                 return -EROFS;
5952
5953         if (flags & S_VERSION)
5954                 inode_inc_iversion(inode);
5955         if (flags & S_CTIME)
5956                 inode->i_ctime = *now;
5957         if (flags & S_MTIME)
5958                 inode->i_mtime = *now;
5959         if (flags & S_ATIME)
5960                 inode->i_atime = *now;
5961         return btrfs_dirty_inode(inode);
5962 }
5963
5964 /*
5965  * find the highest existing sequence number in a directory
5966  * and then set the in-memory index_cnt variable to reflect
5967  * free sequence numbers
5968  */
5969 static int btrfs_set_inode_index_count(struct inode *inode)
5970 {
5971         struct btrfs_root *root = BTRFS_I(inode)->root;
5972         struct btrfs_key key, found_key;
5973         struct btrfs_path *path;
5974         struct extent_buffer *leaf;
5975         int ret;
5976
5977         key.objectid = btrfs_ino(inode);
5978         key.type = BTRFS_DIR_INDEX_KEY;
5979         key.offset = (u64)-1;
5980
5981         path = btrfs_alloc_path();
5982         if (!path)
5983                 return -ENOMEM;
5984
5985         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5986         if (ret < 0)
5987                 goto out;
5988         /* FIXME: we should be able to handle this */
5989         if (ret == 0)
5990                 goto out;
5991         ret = 0;
5992
5993         /*
5994          * MAGIC NUMBER EXPLANATION:
5995          * since we search a directory based on f_pos we have to start at 2
5996          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5997          * else has to start at 2
5998          */
5999         if (path->slots[0] == 0) {
6000                 BTRFS_I(inode)->index_cnt = 2;
6001                 goto out;
6002         }
6003
6004         path->slots[0]--;
6005
6006         leaf = path->nodes[0];
6007         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6008
6009         if (found_key.objectid != btrfs_ino(inode) ||
6010             found_key.type != BTRFS_DIR_INDEX_KEY) {
6011                 BTRFS_I(inode)->index_cnt = 2;
6012                 goto out;
6013         }
6014
6015         BTRFS_I(inode)->index_cnt = found_key.offset + 1;
6016 out:
6017         btrfs_free_path(path);
6018         return ret;
6019 }
6020
6021 /*
6022  * helper to find a free sequence number in a given directory.  This current
6023  * code is very simple, later versions will do smarter things in the btree
6024  */
6025 int btrfs_set_inode_index(struct inode *dir, u64 *index)
6026 {
6027         int ret = 0;
6028
6029         if (BTRFS_I(dir)->index_cnt == (u64)-1) {
6030                 ret = btrfs_inode_delayed_dir_index_count(dir);
6031                 if (ret) {
6032                         ret = btrfs_set_inode_index_count(dir);
6033                         if (ret)
6034                                 return ret;
6035                 }
6036         }
6037
6038         *index = BTRFS_I(dir)->index_cnt;
6039         BTRFS_I(dir)->index_cnt++;
6040
6041         return ret;
6042 }
6043
6044 static int btrfs_insert_inode_locked(struct inode *inode)
6045 {
6046         struct btrfs_iget_args args;
6047         args.location = &BTRFS_I(inode)->location;
6048         args.root = BTRFS_I(inode)->root;
6049
6050         return insert_inode_locked4(inode,
6051                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6052                    btrfs_find_actor, &args);
6053 }
6054
6055 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6056                                      struct btrfs_root *root,
6057                                      struct inode *dir,
6058                                      const char *name, int name_len,
6059                                      u64 ref_objectid, u64 objectid,
6060                                      umode_t mode, u64 *index)
6061 {
6062         struct inode *inode;
6063         struct btrfs_inode_item *inode_item;
6064         struct btrfs_key *location;
6065         struct btrfs_path *path;
6066         struct btrfs_inode_ref *ref;
6067         struct btrfs_key key[2];
6068         u32 sizes[2];
6069         int nitems = name ? 2 : 1;
6070         unsigned long ptr;
6071         int ret;
6072
6073         path = btrfs_alloc_path();
6074         if (!path)
6075                 return ERR_PTR(-ENOMEM);
6076
6077         inode = new_inode(root->fs_info->sb);
6078         if (!inode) {
6079                 btrfs_free_path(path);
6080                 return ERR_PTR(-ENOMEM);
6081         }
6082
6083         /*
6084          * O_TMPFILE, set link count to 0, so that after this point,
6085          * we fill in an inode item with the correct link count.
6086          */
6087         if (!name)
6088                 set_nlink(inode, 0);
6089
6090         /*
6091          * we have to initialize this early, so we can reclaim the inode
6092          * number if we fail afterwards in this function.
6093          */
6094         inode->i_ino = objectid;
6095
6096         if (dir && name) {
6097                 trace_btrfs_inode_request(dir);
6098
6099                 ret = btrfs_set_inode_index(dir, index);
6100                 if (ret) {
6101                         btrfs_free_path(path);
6102                         iput(inode);
6103                         return ERR_PTR(ret);
6104                 }
6105         } else if (dir) {
6106                 *index = 0;
6107         }
6108         /*
6109          * index_cnt is ignored for everything but a dir,
6110          * btrfs_get_inode_index_count has an explanation for the magic
6111          * number
6112          */
6113         BTRFS_I(inode)->index_cnt = 2;
6114         BTRFS_I(inode)->dir_index = *index;
6115         BTRFS_I(inode)->root = root;
6116         BTRFS_I(inode)->generation = trans->transid;
6117         inode->i_generation = BTRFS_I(inode)->generation;
6118
6119         /*
6120          * We could have gotten an inode number from somebody who was fsynced
6121          * and then removed in this same transaction, so let's just set full
6122          * sync since it will be a full sync anyway and this will blow away the
6123          * old info in the log.
6124          */
6125         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6126
6127         key[0].objectid = objectid;
6128         key[0].type = BTRFS_INODE_ITEM_KEY;
6129         key[0].offset = 0;
6130
6131         sizes[0] = sizeof(struct btrfs_inode_item);
6132
6133         if (name) {
6134                 /*
6135                  * Start new inodes with an inode_ref. This is slightly more
6136                  * efficient for small numbers of hard links since they will
6137                  * be packed into one item. Extended refs will kick in if we
6138                  * add more hard links than can fit in the ref item.
6139                  */
6140                 key[1].objectid = objectid;
6141                 key[1].type = BTRFS_INODE_REF_KEY;
6142                 key[1].offset = ref_objectid;
6143
6144                 sizes[1] = name_len + sizeof(*ref);
6145         }
6146
6147         location = &BTRFS_I(inode)->location;
6148         location->objectid = objectid;
6149         location->offset = 0;
6150         location->type = BTRFS_INODE_ITEM_KEY;
6151
6152         ret = btrfs_insert_inode_locked(inode);
6153         if (ret < 0)
6154                 goto fail;
6155
6156         path->leave_spinning = 1;
6157         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6158         if (ret != 0)
6159                 goto fail_unlock;
6160
6161         inode_init_owner(inode, dir, mode);
6162         inode_set_bytes(inode, 0);
6163
6164         inode->i_mtime = CURRENT_TIME;
6165         inode->i_atime = inode->i_mtime;
6166         inode->i_ctime = inode->i_mtime;
6167         BTRFS_I(inode)->i_otime = inode->i_mtime;
6168
6169         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6170                                   struct btrfs_inode_item);
6171         memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
6172                              sizeof(*inode_item));
6173         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6174
6175         if (name) {
6176                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6177                                      struct btrfs_inode_ref);
6178                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6179                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6180                 ptr = (unsigned long)(ref + 1);
6181                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6182         }
6183
6184         btrfs_mark_buffer_dirty(path->nodes[0]);
6185         btrfs_free_path(path);
6186
6187         btrfs_inherit_iflags(inode, dir);
6188
6189         if (S_ISREG(mode)) {
6190                 if (btrfs_test_opt(root, NODATASUM))
6191                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6192                 if (btrfs_test_opt(root, NODATACOW))
6193                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6194                                 BTRFS_INODE_NODATASUM;
6195         }
6196
6197         inode_tree_add(inode);
6198
6199         trace_btrfs_inode_new(inode);
6200         btrfs_set_inode_last_trans(trans, inode);
6201
6202         btrfs_update_root_times(trans, root);
6203
6204         ret = btrfs_inode_inherit_props(trans, inode, dir);
6205         if (ret)
6206                 btrfs_err(root->fs_info,
6207                           "error inheriting props for ino %llu (root %llu): %d",
6208                           btrfs_ino(inode), root->root_key.objectid, ret);
6209
6210         return inode;
6211
6212 fail_unlock:
6213         unlock_new_inode(inode);
6214 fail:
6215         if (dir && name)
6216                 BTRFS_I(dir)->index_cnt--;
6217         btrfs_free_path(path);
6218         iput(inode);
6219         return ERR_PTR(ret);
6220 }
6221
6222 static inline u8 btrfs_inode_type(struct inode *inode)
6223 {
6224         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6225 }
6226
6227 /*
6228  * utility function to add 'inode' into 'parent_inode' with
6229  * a give name and a given sequence number.
6230  * if 'add_backref' is true, also insert a backref from the
6231  * inode to the parent directory.
6232  */
6233 int btrfs_add_link(struct btrfs_trans_handle *trans,
6234                    struct inode *parent_inode, struct inode *inode,
6235                    const char *name, int name_len, int add_backref, u64 index)
6236 {
6237         int ret = 0;
6238         struct btrfs_key key;
6239         struct btrfs_root *root = BTRFS_I(parent_inode)->root;
6240         u64 ino = btrfs_ino(inode);
6241         u64 parent_ino = btrfs_ino(parent_inode);
6242
6243         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6244                 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
6245         } else {
6246                 key.objectid = ino;
6247                 key.type = BTRFS_INODE_ITEM_KEY;
6248                 key.offset = 0;
6249         }
6250
6251         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6252                 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
6253                                          key.objectid, root->root_key.objectid,
6254                                          parent_ino, index, name, name_len);
6255         } else if (add_backref) {
6256                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6257                                              parent_ino, index);
6258         }
6259
6260         /* Nothing to clean up yet */
6261         if (ret)
6262                 return ret;
6263
6264         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6265                                     parent_inode, &key,
6266                                     btrfs_inode_type(inode), index);
6267         if (ret == -EEXIST || ret == -EOVERFLOW)
6268                 goto fail_dir_item;
6269         else if (ret) {
6270                 btrfs_abort_transaction(trans, root, ret);
6271                 return ret;
6272         }
6273
6274         btrfs_i_size_write(parent_inode, parent_inode->i_size +
6275                            name_len * 2);
6276         inode_inc_iversion(parent_inode);
6277         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
6278         ret = btrfs_update_inode(trans, root, parent_inode);
6279         if (ret)
6280                 btrfs_abort_transaction(trans, root, ret);
6281         return ret;
6282
6283 fail_dir_item:
6284         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6285                 u64 local_index;
6286                 int err;
6287                 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
6288                                  key.objectid, root->root_key.objectid,
6289                                  parent_ino, &local_index, name, name_len);
6290
6291         } else if (add_backref) {
6292                 u64 local_index;
6293                 int err;
6294
6295                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6296                                           ino, parent_ino, &local_index);
6297         }
6298         return ret;
6299 }
6300
6301 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6302                             struct inode *dir, struct dentry *dentry,
6303                             struct inode *inode, int backref, u64 index)
6304 {
6305         int err = btrfs_add_link(trans, dir, inode,
6306                                  dentry->d_name.name, dentry->d_name.len,
6307                                  backref, index);
6308         if (err > 0)
6309                 err = -EEXIST;
6310         return err;
6311 }
6312
6313 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6314                         umode_t mode, dev_t rdev)
6315 {
6316         struct btrfs_trans_handle *trans;
6317         struct btrfs_root *root = BTRFS_I(dir)->root;
6318         struct inode *inode = NULL;
6319         int err;
6320         int drop_inode = 0;
6321         u64 objectid;
6322         u64 index = 0;
6323
6324         if (!new_valid_dev(rdev))
6325                 return -EINVAL;
6326
6327         /*
6328          * 2 for inode item and ref
6329          * 2 for dir items
6330          * 1 for xattr if selinux is on
6331          */
6332         trans = btrfs_start_transaction(root, 5);
6333         if (IS_ERR(trans))
6334                 return PTR_ERR(trans);
6335
6336         err = btrfs_find_free_ino(root, &objectid);
6337         if (err)
6338                 goto out_unlock;
6339
6340         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6341                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6342                                 mode, &index);
6343         if (IS_ERR(inode)) {
6344                 err = PTR_ERR(inode);
6345                 goto out_unlock;
6346         }
6347
6348         /*
6349         * If the active LSM wants to access the inode during
6350         * d_instantiate it needs these. Smack checks to see
6351         * if the filesystem supports xattrs by looking at the
6352         * ops vector.
6353         */
6354         inode->i_op = &btrfs_special_inode_operations;
6355         init_special_inode(inode, inode->i_mode, rdev);
6356
6357         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6358         if (err)
6359                 goto out_unlock_inode;
6360
6361         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6362         if (err) {
6363                 goto out_unlock_inode;
6364         } else {
6365                 btrfs_update_inode(trans, root, inode);
6366                 unlock_new_inode(inode);
6367                 d_instantiate(dentry, inode);
6368         }
6369
6370 out_unlock:
6371         btrfs_end_transaction(trans, root);
6372         btrfs_balance_delayed_items(root);
6373         btrfs_btree_balance_dirty(root);
6374         if (drop_inode) {
6375                 inode_dec_link_count(inode);
6376                 iput(inode);
6377         }
6378         return err;
6379
6380 out_unlock_inode:
6381         drop_inode = 1;
6382         unlock_new_inode(inode);
6383         goto out_unlock;
6384
6385 }
6386
6387 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6388                         umode_t mode, bool excl)
6389 {
6390         struct btrfs_trans_handle *trans;
6391         struct btrfs_root *root = BTRFS_I(dir)->root;
6392         struct inode *inode = NULL;
6393         int drop_inode_on_err = 0;
6394         int err;
6395         u64 objectid;
6396         u64 index = 0;
6397
6398         /*
6399          * 2 for inode item and ref
6400          * 2 for dir items
6401          * 1 for xattr if selinux is on
6402          */
6403         trans = btrfs_start_transaction(root, 5);
6404         if (IS_ERR(trans))
6405                 return PTR_ERR(trans);
6406
6407         err = btrfs_find_free_ino(root, &objectid);
6408         if (err)
6409                 goto out_unlock;
6410
6411         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6412                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6413                                 mode, &index);
6414         if (IS_ERR(inode)) {
6415                 err = PTR_ERR(inode);
6416                 goto out_unlock;
6417         }
6418         drop_inode_on_err = 1;
6419         /*
6420         * If the active LSM wants to access the inode during
6421         * d_instantiate it needs these. Smack checks to see
6422         * if the filesystem supports xattrs by looking at the
6423         * ops vector.
6424         */
6425         inode->i_fop = &btrfs_file_operations;
6426         inode->i_op = &btrfs_file_inode_operations;
6427         inode->i_mapping->a_ops = &btrfs_aops;
6428
6429         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6430         if (err)
6431                 goto out_unlock_inode;
6432
6433         err = btrfs_update_inode(trans, root, inode);
6434         if (err)
6435                 goto out_unlock_inode;
6436
6437         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6438         if (err)
6439                 goto out_unlock_inode;
6440
6441         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6442         unlock_new_inode(inode);
6443         d_instantiate(dentry, inode);
6444
6445 out_unlock:
6446         btrfs_end_transaction(trans, root);
6447         if (err && drop_inode_on_err) {
6448                 inode_dec_link_count(inode);
6449                 iput(inode);
6450         }
6451         btrfs_balance_delayed_items(root);
6452         btrfs_btree_balance_dirty(root);
6453         return err;
6454
6455 out_unlock_inode:
6456         unlock_new_inode(inode);
6457         goto out_unlock;
6458
6459 }
6460
6461 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6462                       struct dentry *dentry)
6463 {
6464         struct btrfs_trans_handle *trans;
6465         struct btrfs_root *root = BTRFS_I(dir)->root;
6466         struct inode *inode = d_inode(old_dentry);
6467         u64 index;
6468         int err;
6469         int drop_inode = 0;
6470
6471         /* do not allow sys_link's with other subvols of the same device */
6472         if (root->objectid != BTRFS_I(inode)->root->objectid)
6473                 return -EXDEV;
6474
6475         if (inode->i_nlink >= BTRFS_LINK_MAX)
6476                 return -EMLINK;
6477
6478         err = btrfs_set_inode_index(dir, &index);
6479         if (err)
6480                 goto fail;
6481
6482         /*
6483          * 2 items for inode and inode ref
6484          * 2 items for dir items
6485          * 1 item for parent inode
6486          */
6487         trans = btrfs_start_transaction(root, 5);
6488         if (IS_ERR(trans)) {
6489                 err = PTR_ERR(trans);
6490                 goto fail;
6491         }
6492
6493         /* There are several dir indexes for this inode, clear the cache. */
6494         BTRFS_I(inode)->dir_index = 0ULL;
6495         inc_nlink(inode);
6496         inode_inc_iversion(inode);
6497         inode->i_ctime = CURRENT_TIME;
6498         ihold(inode);
6499         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6500
6501         err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
6502
6503         if (err) {
6504                 drop_inode = 1;
6505         } else {
6506                 struct dentry *parent = dentry->d_parent;
6507                 err = btrfs_update_inode(trans, root, inode);
6508                 if (err)
6509                         goto fail;
6510                 if (inode->i_nlink == 1) {
6511                         /*
6512                          * If new hard link count is 1, it's a file created
6513                          * with open(2) O_TMPFILE flag.
6514                          */
6515                         err = btrfs_orphan_del(trans, inode);
6516                         if (err)
6517                                 goto fail;
6518                 }
6519                 d_instantiate(dentry, inode);
6520                 btrfs_log_new_name(trans, inode, NULL, parent);
6521         }
6522
6523         btrfs_end_transaction(trans, root);
6524         btrfs_balance_delayed_items(root);
6525 fail:
6526         if (drop_inode) {
6527                 inode_dec_link_count(inode);
6528                 iput(inode);
6529         }
6530         btrfs_btree_balance_dirty(root);
6531         return err;
6532 }
6533
6534 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6535 {
6536         struct inode *inode = NULL;
6537         struct btrfs_trans_handle *trans;
6538         struct btrfs_root *root = BTRFS_I(dir)->root;
6539         int err = 0;
6540         int drop_on_err = 0;
6541         u64 objectid = 0;
6542         u64 index = 0;
6543
6544         /*
6545          * 2 items for inode and ref
6546          * 2 items for dir items
6547          * 1 for xattr if selinux is on
6548          */
6549         trans = btrfs_start_transaction(root, 5);
6550         if (IS_ERR(trans))
6551                 return PTR_ERR(trans);
6552
6553         err = btrfs_find_free_ino(root, &objectid);
6554         if (err)
6555                 goto out_fail;
6556
6557         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6558                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6559                                 S_IFDIR | mode, &index);
6560         if (IS_ERR(inode)) {
6561                 err = PTR_ERR(inode);
6562                 goto out_fail;
6563         }
6564
6565         drop_on_err = 1;
6566         /* these must be set before we unlock the inode */
6567         inode->i_op = &btrfs_dir_inode_operations;
6568         inode->i_fop = &btrfs_dir_file_operations;
6569
6570         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6571         if (err)
6572                 goto out_fail_inode;
6573
6574         btrfs_i_size_write(inode, 0);
6575         err = btrfs_update_inode(trans, root, inode);
6576         if (err)
6577                 goto out_fail_inode;
6578
6579         err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
6580                              dentry->d_name.len, 0, index);
6581         if (err)
6582                 goto out_fail_inode;
6583
6584         d_instantiate(dentry, inode);
6585         /*
6586          * mkdir is special.  We're unlocking after we call d_instantiate
6587          * to avoid a race with nfsd calling d_instantiate.
6588          */
6589         unlock_new_inode(inode);
6590         drop_on_err = 0;
6591
6592 out_fail:
6593         btrfs_end_transaction(trans, root);
6594         if (drop_on_err) {
6595                 inode_dec_link_count(inode);
6596                 iput(inode);
6597         }
6598         btrfs_balance_delayed_items(root);
6599         btrfs_btree_balance_dirty(root);
6600         return err;
6601
6602 out_fail_inode:
6603         unlock_new_inode(inode);
6604         goto out_fail;
6605 }
6606
6607 /* Find next extent map of a given extent map, caller needs to ensure locks */
6608 static struct extent_map *next_extent_map(struct extent_map *em)
6609 {
6610         struct rb_node *next;
6611
6612         next = rb_next(&em->rb_node);
6613         if (!next)
6614                 return NULL;
6615         return container_of(next, struct extent_map, rb_node);
6616 }
6617
6618 static struct extent_map *prev_extent_map(struct extent_map *em)
6619 {
6620         struct rb_node *prev;
6621
6622         prev = rb_prev(&em->rb_node);
6623         if (!prev)
6624                 return NULL;
6625         return container_of(prev, struct extent_map, rb_node);
6626 }
6627
6628 /* helper for btfs_get_extent.  Given an existing extent in the tree,
6629  * the existing extent is the nearest extent to map_start,
6630  * and an extent that you want to insert, deal with overlap and insert
6631  * the best fitted new extent into the tree.
6632  */
6633 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6634                                 struct extent_map *existing,
6635                                 struct extent_map *em,
6636                                 u64 map_start)
6637 {
6638         struct extent_map *prev;
6639         struct extent_map *next;
6640         u64 start;
6641         u64 end;
6642         u64 start_diff;
6643
6644         BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6645
6646         if (existing->start > map_start) {
6647                 next = existing;
6648                 prev = prev_extent_map(next);
6649         } else {
6650                 prev = existing;
6651                 next = next_extent_map(prev);
6652         }
6653
6654         start = prev ? extent_map_end(prev) : em->start;
6655         start = max_t(u64, start, em->start);
6656         end = next ? next->start : extent_map_end(em);
6657         end = min_t(u64, end, extent_map_end(em));
6658         start_diff = start - em->start;
6659         em->start = start;
6660         em->len = end - start;
6661         if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6662             !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6663                 em->block_start += start_diff;
6664                 em->block_len -= start_diff;
6665         }
6666         return add_extent_mapping(em_tree, em, 0);
6667 }
6668
6669 static noinline int uncompress_inline(struct btrfs_path *path,
6670                                       struct inode *inode, struct page *page,
6671                                       size_t pg_offset, u64 extent_offset,
6672                                       struct btrfs_file_extent_item *item)
6673 {
6674         int ret;
6675         struct extent_buffer *leaf = path->nodes[0];
6676         char *tmp;
6677         size_t max_size;
6678         unsigned long inline_size;
6679         unsigned long ptr;
6680         int compress_type;
6681
6682         WARN_ON(pg_offset != 0);
6683         compress_type = btrfs_file_extent_compression(leaf, item);
6684         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6685         inline_size = btrfs_file_extent_inline_item_len(leaf,
6686                                         btrfs_item_nr(path->slots[0]));
6687         tmp = kmalloc(inline_size, GFP_NOFS);
6688         if (!tmp)
6689                 return -ENOMEM;
6690         ptr = btrfs_file_extent_inline_start(item);
6691
6692         read_extent_buffer(leaf, tmp, ptr, inline_size);
6693
6694         max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
6695         ret = btrfs_decompress(compress_type, tmp, page,
6696                                extent_offset, inline_size, max_size);
6697         kfree(tmp);
6698         return ret;
6699 }
6700
6701 /*
6702  * a bit scary, this does extent mapping from logical file offset to the disk.
6703  * the ugly parts come from merging extents from the disk with the in-ram
6704  * representation.  This gets more complex because of the data=ordered code,
6705  * where the in-ram extents might be locked pending data=ordered completion.
6706  *
6707  * This also copies inline extents directly into the page.
6708  */
6709
6710 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6711                                     size_t pg_offset, u64 start, u64 len,
6712                                     int create)
6713 {
6714         int ret;
6715         int err = 0;
6716         u64 extent_start = 0;
6717         u64 extent_end = 0;
6718         u64 objectid = btrfs_ino(inode);
6719         u32 found_type;
6720         struct btrfs_path *path = NULL;
6721         struct btrfs_root *root = BTRFS_I(inode)->root;
6722         struct btrfs_file_extent_item *item;
6723         struct extent_buffer *leaf;
6724         struct btrfs_key found_key;
6725         struct extent_map *em = NULL;
6726         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6727         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6728         struct btrfs_trans_handle *trans = NULL;
6729         const bool new_inline = !page || create;
6730
6731 again:
6732         read_lock(&em_tree->lock);
6733         em = lookup_extent_mapping(em_tree, start, len);
6734         if (em)
6735                 em->bdev = root->fs_info->fs_devices->latest_bdev;
6736         read_unlock(&em_tree->lock);
6737
6738         if (em) {
6739                 if (em->start > start || em->start + em->len <= start)
6740                         free_extent_map(em);
6741                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6742                         free_extent_map(em);
6743                 else
6744                         goto out;
6745         }
6746         em = alloc_extent_map();
6747         if (!em) {
6748                 err = -ENOMEM;
6749                 goto out;
6750         }
6751         em->bdev = root->fs_info->fs_devices->latest_bdev;
6752         em->start = EXTENT_MAP_HOLE;
6753         em->orig_start = EXTENT_MAP_HOLE;
6754         em->len = (u64)-1;
6755         em->block_len = (u64)-1;
6756
6757         if (!path) {
6758                 path = btrfs_alloc_path();
6759                 if (!path) {
6760                         err = -ENOMEM;
6761                         goto out;
6762                 }
6763                 /*
6764                  * Chances are we'll be called again, so go ahead and do
6765                  * readahead
6766                  */
6767                 path->reada = 1;
6768         }
6769
6770         ret = btrfs_lookup_file_extent(trans, root, path,
6771                                        objectid, start, trans != NULL);
6772         if (ret < 0) {
6773                 err = ret;
6774                 goto out;
6775         }
6776
6777         if (ret != 0) {
6778                 if (path->slots[0] == 0)
6779                         goto not_found;
6780                 path->slots[0]--;
6781         }
6782
6783         leaf = path->nodes[0];
6784         item = btrfs_item_ptr(leaf, path->slots[0],
6785                               struct btrfs_file_extent_item);
6786         /* are we inside the extent that was found? */
6787         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6788         found_type = found_key.type;
6789         if (found_key.objectid != objectid ||
6790             found_type != BTRFS_EXTENT_DATA_KEY) {
6791                 /*
6792                  * If we backup past the first extent we want to move forward
6793                  * and see if there is an extent in front of us, otherwise we'll
6794                  * say there is a hole for our whole search range which can
6795                  * cause problems.
6796                  */
6797                 extent_end = start;
6798                 goto next;
6799         }
6800
6801         found_type = btrfs_file_extent_type(leaf, item);
6802         extent_start = found_key.offset;
6803         if (found_type == BTRFS_FILE_EXTENT_REG ||
6804             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6805                 extent_end = extent_start +
6806                        btrfs_file_extent_num_bytes(leaf, item);
6807         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6808                 size_t size;
6809                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6810                 extent_end = ALIGN(extent_start + size, root->sectorsize);
6811         }
6812 next:
6813         if (start >= extent_end) {
6814                 path->slots[0]++;
6815                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6816                         ret = btrfs_next_leaf(root, path);
6817                         if (ret < 0) {
6818                                 err = ret;
6819                                 goto out;
6820                         }
6821                         if (ret > 0)
6822                                 goto not_found;
6823                         leaf = path->nodes[0];
6824                 }
6825                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6826                 if (found_key.objectid != objectid ||
6827                     found_key.type != BTRFS_EXTENT_DATA_KEY)
6828                         goto not_found;
6829                 if (start + len <= found_key.offset)
6830                         goto not_found;
6831                 if (start > found_key.offset)
6832                         goto next;
6833                 em->start = start;
6834                 em->orig_start = start;
6835                 em->len = found_key.offset - start;
6836                 goto not_found_em;
6837         }
6838
6839         btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em);
6840
6841         if (found_type == BTRFS_FILE_EXTENT_REG ||
6842             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6843                 goto insert;
6844         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6845                 unsigned long ptr;
6846                 char *map;
6847                 size_t size;
6848                 size_t extent_offset;
6849                 size_t copy_size;
6850
6851                 if (new_inline)
6852                         goto out;
6853
6854                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6855                 extent_offset = page_offset(page) + pg_offset - extent_start;
6856                 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6857                                 size - extent_offset);
6858                 em->start = extent_start + extent_offset;
6859                 em->len = ALIGN(copy_size, root->sectorsize);
6860                 em->orig_block_len = em->len;
6861                 em->orig_start = em->start;
6862                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6863                 if (create == 0 && !PageUptodate(page)) {
6864                         if (btrfs_file_extent_compression(leaf, item) !=
6865                             BTRFS_COMPRESS_NONE) {
6866                                 ret = uncompress_inline(path, inode, page,
6867                                                         pg_offset,
6868                                                         extent_offset, item);
6869                                 if (ret) {
6870                                         err = ret;
6871                                         goto out;
6872                                 }
6873                         } else {
6874                                 map = kmap(page);
6875                                 read_extent_buffer(leaf, map + pg_offset, ptr,
6876                                                    copy_size);
6877                                 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6878                                         memset(map + pg_offset + copy_size, 0,
6879                                                PAGE_CACHE_SIZE - pg_offset -
6880                                                copy_size);
6881                                 }
6882                                 kunmap(page);
6883                         }
6884                         flush_dcache_page(page);
6885                 } else if (create && PageUptodate(page)) {
6886                         BUG();
6887                         if (!trans) {
6888                                 kunmap(page);
6889                                 free_extent_map(em);
6890                                 em = NULL;
6891
6892                                 btrfs_release_path(path);
6893                                 trans = btrfs_join_transaction(root);
6894
6895                                 if (IS_ERR(trans))
6896                                         return ERR_CAST(trans);
6897                                 goto again;
6898                         }
6899                         map = kmap(page);
6900                         write_extent_buffer(leaf, map + pg_offset, ptr,
6901                                             copy_size);
6902                         kunmap(page);
6903                         btrfs_mark_buffer_dirty(leaf);
6904                 }
6905                 set_extent_uptodate(io_tree, em->start,
6906                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
6907                 goto insert;
6908         }
6909 not_found:
6910         em->start = start;
6911         em->orig_start = start;
6912         em->len = len;
6913 not_found_em:
6914         em->block_start = EXTENT_MAP_HOLE;
6915         set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6916 insert:
6917         btrfs_release_path(path);
6918         if (em->start > start || extent_map_end(em) <= start) {
6919                 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6920                         em->start, em->len, start, len);
6921                 err = -EIO;
6922                 goto out;
6923         }
6924
6925         err = 0;
6926         write_lock(&em_tree->lock);
6927         ret = add_extent_mapping(em_tree, em, 0);
6928         /* it is possible that someone inserted the extent into the tree
6929          * while we had the lock dropped.  It is also possible that
6930          * an overlapping map exists in the tree
6931          */
6932         if (ret == -EEXIST) {
6933                 struct extent_map *existing;
6934
6935                 ret = 0;
6936
6937                 existing = search_extent_mapping(em_tree, start, len);
6938                 /*
6939                  * existing will always be non-NULL, since there must be
6940                  * extent causing the -EEXIST.
6941                  */
6942                 if (start >= extent_map_end(existing) ||
6943                     start <= existing->start) {
6944                         /*
6945                          * The existing extent map is the one nearest to
6946                          * the [start, start + len) range which overlaps
6947                          */
6948                         err = merge_extent_mapping(em_tree, existing,
6949                                                    em, start);
6950                         free_extent_map(existing);
6951                         if (err) {
6952                                 free_extent_map(em);
6953                                 em = NULL;
6954                         }
6955                 } else {
6956                         free_extent_map(em);
6957                         em = existing;
6958                         err = 0;
6959                 }
6960         }
6961         write_unlock(&em_tree->lock);
6962 out:
6963
6964         trace_btrfs_get_extent(root, em);
6965
6966         btrfs_free_path(path);
6967         if (trans) {
6968                 ret = btrfs_end_transaction(trans, root);
6969                 if (!err)
6970                         err = ret;
6971         }
6972         if (err) {
6973                 free_extent_map(em);
6974                 return ERR_PTR(err);
6975         }
6976         BUG_ON(!em); /* Error is always set */
6977         return em;
6978 }
6979
6980 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
6981                                            size_t pg_offset, u64 start, u64 len,
6982                                            int create)
6983 {
6984         struct extent_map *em;
6985         struct extent_map *hole_em = NULL;
6986         u64 range_start = start;
6987         u64 end;
6988         u64 found;
6989         u64 found_end;
6990         int err = 0;
6991
6992         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
6993         if (IS_ERR(em))
6994                 return em;
6995         if (em) {
6996                 /*
6997                  * if our em maps to
6998                  * -  a hole or
6999                  * -  a pre-alloc extent,
7000                  * there might actually be delalloc bytes behind it.
7001                  */
7002                 if (em->block_start != EXTENT_MAP_HOLE &&
7003                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7004                         return em;
7005                 else
7006                         hole_em = em;
7007         }
7008
7009         /* check to see if we've wrapped (len == -1 or similar) */
7010         end = start + len;
7011         if (end < start)
7012                 end = (u64)-1;
7013         else
7014                 end -= 1;
7015
7016         em = NULL;
7017
7018         /* ok, we didn't find anything, lets look for delalloc */
7019         found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
7020                                  end, len, EXTENT_DELALLOC, 1);
7021         found_end = range_start + found;
7022         if (found_end < range_start)
7023                 found_end = (u64)-1;
7024
7025         /*
7026          * we didn't find anything useful, return
7027          * the original results from get_extent()
7028          */
7029         if (range_start > end || found_end <= start) {
7030                 em = hole_em;
7031                 hole_em = NULL;
7032                 goto out;
7033         }
7034
7035         /* adjust the range_start to make sure it doesn't
7036          * go backwards from the start they passed in
7037          */
7038         range_start = max(start, range_start);
7039         found = found_end - range_start;
7040
7041         if (found > 0) {
7042                 u64 hole_start = start;
7043                 u64 hole_len = len;
7044
7045                 em = alloc_extent_map();
7046                 if (!em) {
7047                         err = -ENOMEM;
7048                         goto out;
7049                 }
7050                 /*
7051                  * when btrfs_get_extent can't find anything it
7052                  * returns one huge hole
7053                  *
7054                  * make sure what it found really fits our range, and
7055                  * adjust to make sure it is based on the start from
7056                  * the caller
7057                  */
7058                 if (hole_em) {
7059                         u64 calc_end = extent_map_end(hole_em);
7060
7061                         if (calc_end <= start || (hole_em->start > end)) {
7062                                 free_extent_map(hole_em);
7063                                 hole_em = NULL;
7064                         } else {
7065                                 hole_start = max(hole_em->start, start);
7066                                 hole_len = calc_end - hole_start;
7067                         }
7068                 }
7069                 em->bdev = NULL;
7070                 if (hole_em && range_start > hole_start) {
7071                         /* our hole starts before our delalloc, so we
7072                          * have to return just the parts of the hole
7073                          * that go until  the delalloc starts
7074                          */
7075                         em->len = min(hole_len,
7076                                       range_start - hole_start);
7077                         em->start = hole_start;
7078                         em->orig_start = hole_start;
7079                         /*
7080                          * don't adjust block start at all,
7081                          * it is fixed at EXTENT_MAP_HOLE
7082                          */
7083                         em->block_start = hole_em->block_start;
7084                         em->block_len = hole_len;
7085                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7086                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7087                 } else {
7088                         em->start = range_start;
7089                         em->len = found;
7090                         em->orig_start = range_start;
7091                         em->block_start = EXTENT_MAP_DELALLOC;
7092                         em->block_len = found;
7093                 }
7094         } else if (hole_em) {
7095                 return hole_em;
7096         }
7097 out:
7098
7099         free_extent_map(hole_em);
7100         if (err) {
7101                 free_extent_map(em);
7102                 return ERR_PTR(err);
7103         }
7104         return em;
7105 }
7106
7107 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7108                                                   u64 start, u64 len)
7109 {
7110         struct btrfs_root *root = BTRFS_I(inode)->root;
7111         struct extent_map *em;
7112         struct btrfs_key ins;
7113         u64 alloc_hint;
7114         int ret;
7115
7116         alloc_hint = get_extent_allocation_hint(inode, start, len);
7117         ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
7118                                    alloc_hint, &ins, 1, 1);
7119         if (ret)
7120                 return ERR_PTR(ret);
7121
7122         em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
7123                               ins.offset, ins.offset, ins.offset, 0);
7124         if (IS_ERR(em)) {
7125                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7126                 return em;
7127         }
7128
7129         ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
7130                                            ins.offset, ins.offset, 0);
7131         if (ret) {
7132                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7133                 free_extent_map(em);
7134                 return ERR_PTR(ret);
7135         }
7136
7137         return em;
7138 }
7139
7140 /*
7141  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7142  * block must be cow'd
7143  */
7144 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7145                               u64 *orig_start, u64 *orig_block_len,
7146                               u64 *ram_bytes)
7147 {
7148         struct btrfs_trans_handle *trans;
7149         struct btrfs_path *path;
7150         int ret;
7151         struct extent_buffer *leaf;
7152         struct btrfs_root *root = BTRFS_I(inode)->root;
7153         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7154         struct btrfs_file_extent_item *fi;
7155         struct btrfs_key key;
7156         u64 disk_bytenr;
7157         u64 backref_offset;
7158         u64 extent_end;
7159         u64 num_bytes;
7160         int slot;
7161         int found_type;
7162         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7163
7164         path = btrfs_alloc_path();
7165         if (!path)
7166                 return -ENOMEM;
7167
7168         ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7169                                        offset, 0);
7170         if (ret < 0)
7171                 goto out;
7172
7173         slot = path->slots[0];
7174         if (ret == 1) {
7175                 if (slot == 0) {
7176                         /* can't find the item, must cow */
7177                         ret = 0;
7178                         goto out;
7179                 }
7180                 slot--;
7181         }
7182         ret = 0;
7183         leaf = path->nodes[0];
7184         btrfs_item_key_to_cpu(leaf, &key, slot);
7185         if (key.objectid != btrfs_ino(inode) ||
7186             key.type != BTRFS_EXTENT_DATA_KEY) {
7187                 /* not our file or wrong item type, must cow */
7188                 goto out;
7189         }
7190
7191         if (key.offset > offset) {
7192                 /* Wrong offset, must cow */
7193                 goto out;
7194         }
7195
7196         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7197         found_type = btrfs_file_extent_type(leaf, fi);
7198         if (found_type != BTRFS_FILE_EXTENT_REG &&
7199             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7200                 /* not a regular extent, must cow */
7201                 goto out;
7202         }
7203
7204         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7205                 goto out;
7206
7207         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7208         if (extent_end <= offset)
7209                 goto out;
7210
7211         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7212         if (disk_bytenr == 0)
7213                 goto out;
7214
7215         if (btrfs_file_extent_compression(leaf, fi) ||
7216             btrfs_file_extent_encryption(leaf, fi) ||
7217             btrfs_file_extent_other_encoding(leaf, fi))
7218                 goto out;
7219
7220         backref_offset = btrfs_file_extent_offset(leaf, fi);
7221
7222         if (orig_start) {
7223                 *orig_start = key.offset - backref_offset;
7224                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7225                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7226         }
7227
7228         if (btrfs_extent_readonly(root, disk_bytenr))
7229                 goto out;
7230
7231         num_bytes = min(offset + *len, extent_end) - offset;
7232         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7233                 u64 range_end;
7234
7235                 range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
7236                 ret = test_range_bit(io_tree, offset, range_end,
7237                                      EXTENT_DELALLOC, 0, NULL);
7238                 if (ret) {
7239                         ret = -EAGAIN;
7240                         goto out;
7241                 }
7242         }
7243
7244         btrfs_release_path(path);
7245
7246         /*
7247          * look for other files referencing this extent, if we
7248          * find any we must cow
7249          */
7250         trans = btrfs_join_transaction(root);
7251         if (IS_ERR(trans)) {
7252                 ret = 0;
7253                 goto out;
7254         }
7255
7256         ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
7257                                     key.offset - backref_offset, disk_bytenr);
7258         btrfs_end_transaction(trans, root);
7259         if (ret) {
7260                 ret = 0;
7261                 goto out;
7262         }
7263
7264         /*
7265          * adjust disk_bytenr and num_bytes to cover just the bytes
7266          * in this extent we are about to write.  If there
7267          * are any csums in that range we have to cow in order
7268          * to keep the csums correct
7269          */
7270         disk_bytenr += backref_offset;
7271         disk_bytenr += offset - key.offset;
7272         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
7273                                 goto out;
7274         /*
7275          * all of the above have passed, it is safe to overwrite this extent
7276          * without cow
7277          */
7278         *len = num_bytes;
7279         ret = 1;
7280 out:
7281         btrfs_free_path(path);
7282         return ret;
7283 }
7284
7285 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7286 {
7287         struct radix_tree_root *root = &inode->i_mapping->page_tree;
7288         int found = false;
7289         void **pagep = NULL;
7290         struct page *page = NULL;
7291         int start_idx;
7292         int end_idx;
7293
7294         start_idx = start >> PAGE_CACHE_SHIFT;
7295
7296         /*
7297          * end is the last byte in the last page.  end == start is legal
7298          */
7299         end_idx = end >> PAGE_CACHE_SHIFT;
7300
7301         rcu_read_lock();
7302
7303         /* Most of the code in this while loop is lifted from
7304          * find_get_page.  It's been modified to begin searching from a
7305          * page and return just the first page found in that range.  If the
7306          * found idx is less than or equal to the end idx then we know that
7307          * a page exists.  If no pages are found or if those pages are
7308          * outside of the range then we're fine (yay!) */
7309         while (page == NULL &&
7310                radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7311                 page = radix_tree_deref_slot(pagep);
7312                 if (unlikely(!page))
7313                         break;
7314
7315                 if (radix_tree_exception(page)) {
7316                         if (radix_tree_deref_retry(page)) {
7317                                 page = NULL;
7318                                 continue;
7319                         }
7320                         /*
7321                          * Otherwise, shmem/tmpfs must be storing a swap entry
7322                          * here as an exceptional entry: so return it without
7323                          * attempting to raise page count.
7324                          */
7325                         page = NULL;
7326                         break; /* TODO: Is this relevant for this use case? */
7327                 }
7328
7329                 if (!page_cache_get_speculative(page)) {
7330                         page = NULL;
7331                         continue;
7332                 }
7333
7334                 /*
7335                  * Has the page moved?
7336                  * This is part of the lockless pagecache protocol. See
7337                  * include/linux/pagemap.h for details.
7338                  */
7339                 if (unlikely(page != *pagep)) {
7340                         page_cache_release(page);
7341                         page = NULL;
7342                 }
7343         }
7344
7345         if (page) {
7346                 if (page->index <= end_idx)
7347                         found = true;
7348                 page_cache_release(page);
7349         }
7350
7351         rcu_read_unlock();
7352         return found;
7353 }
7354
7355 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7356                               struct extent_state **cached_state, int writing)
7357 {
7358         struct btrfs_ordered_extent *ordered;
7359         int ret = 0;
7360
7361         while (1) {
7362                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7363                                  0, cached_state);
7364                 /*
7365                  * We're concerned with the entire range that we're going to be
7366                  * doing DIO to, so we need to make sure theres no ordered
7367                  * extents in this range.
7368                  */
7369                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
7370                                                      lockend - lockstart + 1);
7371
7372                 /*
7373                  * We need to make sure there are no buffered pages in this
7374                  * range either, we could have raced between the invalidate in
7375                  * generic_file_direct_write and locking the extent.  The
7376                  * invalidate needs to happen so that reads after a write do not
7377                  * get stale data.
7378                  */
7379                 if (!ordered &&
7380                     (!writing ||
7381                      !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7382                         break;
7383
7384                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7385                                      cached_state, GFP_NOFS);
7386
7387                 if (ordered) {
7388                         btrfs_start_ordered_extent(inode, ordered, 1);
7389                         btrfs_put_ordered_extent(ordered);
7390                 } else {
7391                         /* Screw you mmap */
7392                         ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
7393                         if (ret)
7394                                 break;
7395                         ret = filemap_fdatawait_range(inode->i_mapping,
7396                                                       lockstart,
7397                                                       lockend);
7398                         if (ret)
7399                                 break;
7400
7401                         /*
7402                          * If we found a page that couldn't be invalidated just
7403                          * fall back to buffered.
7404                          */
7405                         ret = invalidate_inode_pages2_range(inode->i_mapping,
7406                                         lockstart >> PAGE_CACHE_SHIFT,
7407                                         lockend >> PAGE_CACHE_SHIFT);
7408                         if (ret)
7409                                 break;
7410                 }
7411
7412                 cond_resched();
7413         }
7414
7415         return ret;
7416 }
7417
7418 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7419                                            u64 len, u64 orig_start,
7420                                            u64 block_start, u64 block_len,
7421                                            u64 orig_block_len, u64 ram_bytes,
7422                                            int type)
7423 {
7424         struct extent_map_tree *em_tree;
7425         struct extent_map *em;
7426         struct btrfs_root *root = BTRFS_I(inode)->root;
7427         int ret;
7428
7429         em_tree = &BTRFS_I(inode)->extent_tree;
7430         em = alloc_extent_map();
7431         if (!em)
7432                 return ERR_PTR(-ENOMEM);
7433
7434         em->start = start;
7435         em->orig_start = orig_start;
7436         em->mod_start = start;
7437         em->mod_len = len;
7438         em->len = len;
7439         em->block_len = block_len;
7440         em->block_start = block_start;
7441         em->bdev = root->fs_info->fs_devices->latest_bdev;
7442         em->orig_block_len = orig_block_len;
7443         em->ram_bytes = ram_bytes;
7444         em->generation = -1;
7445         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7446         if (type == BTRFS_ORDERED_PREALLOC)
7447                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7448
7449         do {
7450                 btrfs_drop_extent_cache(inode, em->start,
7451                                 em->start + em->len - 1, 0);
7452                 write_lock(&em_tree->lock);
7453                 ret = add_extent_mapping(em_tree, em, 1);
7454                 write_unlock(&em_tree->lock);
7455         } while (ret == -EEXIST);
7456
7457         if (ret) {
7458                 free_extent_map(em);
7459                 return ERR_PTR(ret);
7460         }
7461
7462         return em;
7463 }
7464
7465 struct btrfs_dio_data {
7466         u64 outstanding_extents;
7467         u64 reserve;
7468 };
7469
7470 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7471                                    struct buffer_head *bh_result, int create)
7472 {
7473         struct extent_map *em;
7474         struct btrfs_root *root = BTRFS_I(inode)->root;
7475         struct extent_state *cached_state = NULL;
7476         struct btrfs_dio_data *dio_data = NULL;
7477         u64 start = iblock << inode->i_blkbits;
7478         u64 lockstart, lockend;
7479         u64 len = bh_result->b_size;
7480         int unlock_bits = EXTENT_LOCKED;
7481         int ret = 0;
7482
7483         if (create)
7484                 unlock_bits |= EXTENT_DIRTY;
7485         else
7486                 len = min_t(u64, len, root->sectorsize);
7487
7488         lockstart = start;
7489         lockend = start + len - 1;
7490
7491         if (current->journal_info) {
7492                 /*
7493                  * Need to pull our outstanding extents and set journal_info to NULL so
7494                  * that anything that needs to check if there's a transction doesn't get
7495                  * confused.
7496                  */
7497                 dio_data = current->journal_info;
7498                 current->journal_info = NULL;
7499         }
7500
7501         /*
7502          * If this errors out it's because we couldn't invalidate pagecache for
7503          * this range and we need to fallback to buffered.
7504          */
7505         if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
7506                 return -ENOTBLK;
7507
7508         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
7509         if (IS_ERR(em)) {
7510                 ret = PTR_ERR(em);
7511                 goto unlock_err;
7512         }
7513
7514         /*
7515          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7516          * io.  INLINE is special, and we could probably kludge it in here, but
7517          * it's still buffered so for safety lets just fall back to the generic
7518          * buffered path.
7519          *
7520          * For COMPRESSED we _have_ to read the entire extent in so we can
7521          * decompress it, so there will be buffering required no matter what we
7522          * do, so go ahead and fallback to buffered.
7523          *
7524          * We return -ENOTBLK because thats what makes DIO go ahead and go back
7525          * to buffered IO.  Don't blame me, this is the price we pay for using
7526          * the generic code.
7527          */
7528         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7529             em->block_start == EXTENT_MAP_INLINE) {
7530                 free_extent_map(em);
7531                 ret = -ENOTBLK;
7532                 goto unlock_err;
7533         }
7534
7535         /* Just a good old fashioned hole, return */
7536         if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7537                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7538                 free_extent_map(em);
7539                 goto unlock_err;
7540         }
7541
7542         /*
7543          * We don't allocate a new extent in the following cases
7544          *
7545          * 1) The inode is marked as NODATACOW.  In this case we'll just use the
7546          * existing extent.
7547          * 2) The extent is marked as PREALLOC.  We're good to go here and can
7548          * just use the extent.
7549          *
7550          */
7551         if (!create) {
7552                 len = min(len, em->len - (start - em->start));
7553                 lockstart = start + len;
7554                 goto unlock;
7555         }
7556
7557         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7558             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7559              em->block_start != EXTENT_MAP_HOLE)) {
7560                 int type;
7561                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7562
7563                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7564                         type = BTRFS_ORDERED_PREALLOC;
7565                 else
7566                         type = BTRFS_ORDERED_NOCOW;
7567                 len = min(len, em->len - (start - em->start));
7568                 block_start = em->block_start + (start - em->start);
7569
7570                 if (can_nocow_extent(inode, start, &len, &orig_start,
7571                                      &orig_block_len, &ram_bytes) == 1) {
7572                         if (type == BTRFS_ORDERED_PREALLOC) {
7573                                 free_extent_map(em);
7574                                 em = create_pinned_em(inode, start, len,
7575                                                        orig_start,
7576                                                        block_start, len,
7577                                                        orig_block_len,
7578                                                        ram_bytes, type);
7579                                 if (IS_ERR(em)) {
7580                                         ret = PTR_ERR(em);
7581                                         goto unlock_err;
7582                                 }
7583                         }
7584
7585                         ret = btrfs_add_ordered_extent_dio(inode, start,
7586                                            block_start, len, len, type);
7587                         if (ret) {
7588                                 free_extent_map(em);
7589                                 goto unlock_err;
7590                         }
7591                         goto unlock;
7592                 }
7593         }
7594
7595         /*
7596          * this will cow the extent, reset the len in case we changed
7597          * it above
7598          */
7599         len = bh_result->b_size;
7600         free_extent_map(em);
7601         em = btrfs_new_extent_direct(inode, start, len);
7602         if (IS_ERR(em)) {
7603                 ret = PTR_ERR(em);
7604                 goto unlock_err;
7605         }
7606         len = min(len, em->len - (start - em->start));
7607 unlock:
7608         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7609                 inode->i_blkbits;
7610         bh_result->b_size = len;
7611         bh_result->b_bdev = em->bdev;
7612         set_buffer_mapped(bh_result);
7613         if (create) {
7614                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7615                         set_buffer_new(bh_result);
7616
7617                 /*
7618                  * Need to update the i_size under the extent lock so buffered
7619                  * readers will get the updated i_size when we unlock.
7620                  */
7621                 if (start + len > i_size_read(inode))
7622                         i_size_write(inode, start + len);
7623
7624                 /*
7625                  * If we have an outstanding_extents count still set then we're
7626                  * within our reservation, otherwise we need to adjust our inode
7627                  * counter appropriately.
7628                  */
7629                 if (dio_data->outstanding_extents) {
7630                         (dio_data->outstanding_extents)--;
7631                 } else {
7632                         spin_lock(&BTRFS_I(inode)->lock);
7633                         BTRFS_I(inode)->outstanding_extents++;
7634                         spin_unlock(&BTRFS_I(inode)->lock);
7635                 }
7636
7637                 btrfs_free_reserved_data_space(inode, len);
7638                 WARN_ON(dio_data->reserve < len);
7639                 dio_data->reserve -= len;
7640                 current->journal_info = dio_data;
7641         }
7642
7643         /*
7644          * In the case of write we need to clear and unlock the entire range,
7645          * in the case of read we need to unlock only the end area that we
7646          * aren't using if there is any left over space.
7647          */
7648         if (lockstart < lockend) {
7649                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7650                                  lockend, unlock_bits, 1, 0,
7651                                  &cached_state, GFP_NOFS);
7652         } else {
7653                 free_extent_state(cached_state);
7654         }
7655
7656         free_extent_map(em);
7657
7658         return 0;
7659
7660 unlock_err:
7661         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7662                          unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7663         if (dio_data)
7664                 current->journal_info = dio_data;
7665         return ret;
7666 }
7667
7668 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7669                                         int rw, int mirror_num)
7670 {
7671         struct btrfs_root *root = BTRFS_I(inode)->root;
7672         int ret;
7673
7674         BUG_ON(rw & REQ_WRITE);
7675
7676         bio_get(bio);
7677
7678         ret = btrfs_bio_wq_end_io(root->fs_info, bio,
7679                                   BTRFS_WQ_ENDIO_DIO_REPAIR);
7680         if (ret)
7681                 goto err;
7682
7683         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
7684 err:
7685         bio_put(bio);
7686         return ret;
7687 }
7688
7689 static int btrfs_check_dio_repairable(struct inode *inode,
7690                                       struct bio *failed_bio,
7691                                       struct io_failure_record *failrec,
7692                                       int failed_mirror)
7693 {
7694         int num_copies;
7695
7696         num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
7697                                       failrec->logical, failrec->len);
7698         if (num_copies == 1) {
7699                 /*
7700                  * we only have a single copy of the data, so don't bother with
7701                  * all the retry and error correction code that follows. no
7702                  * matter what the error is, it is very likely to persist.
7703                  */
7704                 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
7705                          num_copies, failrec->this_mirror, failed_mirror);
7706                 return 0;
7707         }
7708
7709         failrec->failed_mirror = failed_mirror;
7710         failrec->this_mirror++;
7711         if (failrec->this_mirror == failed_mirror)
7712                 failrec->this_mirror++;
7713
7714         if (failrec->this_mirror > num_copies) {
7715                 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
7716                          num_copies, failrec->this_mirror, failed_mirror);
7717                 return 0;
7718         }
7719
7720         return 1;
7721 }
7722
7723 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7724                           struct page *page, u64 start, u64 end,
7725                           int failed_mirror, bio_end_io_t *repair_endio,
7726                           void *repair_arg)
7727 {
7728         struct io_failure_record *failrec;
7729         struct bio *bio;
7730         int isector;
7731         int read_mode;
7732         int ret;
7733
7734         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
7735
7736         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7737         if (ret)
7738                 return ret;
7739
7740         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7741                                          failed_mirror);
7742         if (!ret) {
7743                 free_io_failure(inode, failrec);
7744                 return -EIO;
7745         }
7746
7747         if (failed_bio->bi_vcnt > 1)
7748                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
7749         else
7750                 read_mode = READ_SYNC;
7751
7752         isector = start - btrfs_io_bio(failed_bio)->logical;
7753         isector >>= inode->i_sb->s_blocksize_bits;
7754         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7755                                       0, isector, repair_endio, repair_arg);
7756         if (!bio) {
7757                 free_io_failure(inode, failrec);
7758                 return -EIO;
7759         }
7760
7761         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7762                     "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7763                     read_mode, failrec->this_mirror, failrec->in_validation);
7764
7765         ret = submit_dio_repair_bio(inode, bio, read_mode,
7766                                     failrec->this_mirror);
7767         if (ret) {
7768                 free_io_failure(inode, failrec);
7769                 bio_put(bio);
7770         }
7771
7772         return ret;
7773 }
7774
7775 struct btrfs_retry_complete {
7776         struct completion done;
7777         struct inode *inode;
7778         u64 start;
7779         int uptodate;
7780 };
7781
7782 static void btrfs_retry_endio_nocsum(struct bio *bio)
7783 {
7784         struct btrfs_retry_complete *done = bio->bi_private;
7785         struct bio_vec *bvec;
7786         int i;
7787
7788         if (bio->bi_error)
7789                 goto end;
7790
7791         done->uptodate = 1;
7792         bio_for_each_segment_all(bvec, bio, i)
7793                 clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
7794 end:
7795         complete(&done->done);
7796         bio_put(bio);
7797 }
7798
7799 static int __btrfs_correct_data_nocsum(struct inode *inode,
7800                                        struct btrfs_io_bio *io_bio)
7801 {
7802         struct bio_vec *bvec;
7803         struct btrfs_retry_complete done;
7804         u64 start;
7805         int i;
7806         int ret;
7807
7808         start = io_bio->logical;
7809         done.inode = inode;
7810
7811         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7812 try_again:
7813                 done.uptodate = 0;
7814                 done.start = start;
7815                 init_completion(&done.done);
7816
7817                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7818                                      start + bvec->bv_len - 1,
7819                                      io_bio->mirror_num,
7820                                      btrfs_retry_endio_nocsum, &done);
7821                 if (ret)
7822                         return ret;
7823
7824                 wait_for_completion(&done.done);
7825
7826                 if (!done.uptodate) {
7827                         /* We might have another mirror, so try again */
7828                         goto try_again;
7829                 }
7830
7831                 start += bvec->bv_len;
7832         }
7833
7834         return 0;
7835 }
7836
7837 static void btrfs_retry_endio(struct bio *bio)
7838 {
7839         struct btrfs_retry_complete *done = bio->bi_private;
7840         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7841         struct bio_vec *bvec;
7842         int uptodate;
7843         int ret;
7844         int i;
7845
7846         if (bio->bi_error)
7847                 goto end;
7848
7849         uptodate = 1;
7850         bio_for_each_segment_all(bvec, bio, i) {
7851                 ret = __readpage_endio_check(done->inode, io_bio, i,
7852                                              bvec->bv_page, 0,
7853                                              done->start, bvec->bv_len);
7854                 if (!ret)
7855                         clean_io_failure(done->inode, done->start,
7856                                          bvec->bv_page, 0);
7857                 else
7858                         uptodate = 0;
7859         }
7860
7861         done->uptodate = uptodate;
7862 end:
7863         complete(&done->done);
7864         bio_put(bio);
7865 }
7866
7867 static int __btrfs_subio_endio_read(struct inode *inode,
7868                                     struct btrfs_io_bio *io_bio, int err)
7869 {
7870         struct bio_vec *bvec;
7871         struct btrfs_retry_complete done;
7872         u64 start;
7873         u64 offset = 0;
7874         int i;
7875         int ret;
7876
7877         err = 0;
7878         start = io_bio->logical;
7879         done.inode = inode;
7880
7881         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7882                 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
7883                                              0, start, bvec->bv_len);
7884                 if (likely(!ret))
7885                         goto next;
7886 try_again:
7887                 done.uptodate = 0;
7888                 done.start = start;
7889                 init_completion(&done.done);
7890
7891                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7892                                      start + bvec->bv_len - 1,
7893                                      io_bio->mirror_num,
7894                                      btrfs_retry_endio, &done);
7895                 if (ret) {
7896                         err = ret;
7897                         goto next;
7898                 }
7899
7900                 wait_for_completion(&done.done);
7901
7902                 if (!done.uptodate) {
7903                         /* We might have another mirror, so try again */
7904                         goto try_again;
7905                 }
7906 next:
7907                 offset += bvec->bv_len;
7908                 start += bvec->bv_len;
7909         }
7910
7911         return err;
7912 }
7913
7914 static int btrfs_subio_endio_read(struct inode *inode,
7915                                   struct btrfs_io_bio *io_bio, int err)
7916 {
7917         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7918
7919         if (skip_csum) {
7920                 if (unlikely(err))
7921                         return __btrfs_correct_data_nocsum(inode, io_bio);
7922                 else
7923                         return 0;
7924         } else {
7925                 return __btrfs_subio_endio_read(inode, io_bio, err);
7926         }
7927 }
7928
7929 static void btrfs_endio_direct_read(struct bio *bio)
7930 {
7931         struct btrfs_dio_private *dip = bio->bi_private;
7932         struct inode *inode = dip->inode;
7933         struct bio *dio_bio;
7934         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7935         int err = bio->bi_error;
7936
7937         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
7938                 err = btrfs_subio_endio_read(inode, io_bio, err);
7939
7940         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
7941                       dip->logical_offset + dip->bytes - 1);
7942         dio_bio = dip->dio_bio;
7943
7944         kfree(dip);
7945
7946         dio_end_io(dio_bio, bio->bi_error);
7947
7948         if (io_bio->end_io)
7949                 io_bio->end_io(io_bio, err);
7950         bio_put(bio);
7951 }
7952
7953 static void btrfs_endio_direct_write(struct bio *bio)
7954 {
7955         struct btrfs_dio_private *dip = bio->bi_private;
7956         struct inode *inode = dip->inode;
7957         struct btrfs_root *root = BTRFS_I(inode)->root;
7958         struct btrfs_ordered_extent *ordered = NULL;
7959         u64 ordered_offset = dip->logical_offset;
7960         u64 ordered_bytes = dip->bytes;
7961         struct bio *dio_bio;
7962         int ret;
7963
7964 again:
7965         ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
7966                                                    &ordered_offset,
7967                                                    ordered_bytes,
7968                                                    !bio->bi_error);
7969         if (!ret)
7970                 goto out_test;
7971
7972         btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
7973                         finish_ordered_fn, NULL, NULL);
7974         btrfs_queue_work(root->fs_info->endio_write_workers,
7975                          &ordered->work);
7976 out_test:
7977         /*
7978          * our bio might span multiple ordered extents.  If we haven't
7979          * completed the accounting for the whole dio, go back and try again
7980          */
7981         if (ordered_offset < dip->logical_offset + dip->bytes) {
7982                 ordered_bytes = dip->logical_offset + dip->bytes -
7983                         ordered_offset;
7984                 ordered = NULL;
7985                 goto again;
7986         }
7987         dio_bio = dip->dio_bio;
7988
7989         kfree(dip);
7990
7991         dio_end_io(dio_bio, bio->bi_error);
7992         bio_put(bio);
7993 }
7994
7995 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
7996                                     struct bio *bio, int mirror_num,
7997                                     unsigned long bio_flags, u64 offset)
7998 {
7999         int ret;
8000         struct btrfs_root *root = BTRFS_I(inode)->root;
8001         ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
8002         BUG_ON(ret); /* -ENOMEM */
8003         return 0;
8004 }
8005
8006 static void btrfs_end_dio_bio(struct bio *bio)
8007 {
8008         struct btrfs_dio_private *dip = bio->bi_private;
8009         int err = bio->bi_error;
8010
8011         if (err)
8012                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8013                            "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
8014                            btrfs_ino(dip->inode), bio->bi_rw,
8015                            (unsigned long long)bio->bi_iter.bi_sector,
8016                            bio->bi_iter.bi_size, err);
8017
8018         if (dip->subio_endio)
8019                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8020
8021         if (err) {
8022                 dip->errors = 1;
8023
8024                 /*
8025                  * before atomic variable goto zero, we must make sure
8026                  * dip->errors is perceived to be set.
8027                  */
8028                 smp_mb__before_atomic();
8029         }
8030
8031         /* if there are more bios still pending for this dio, just exit */
8032         if (!atomic_dec_and_test(&dip->pending_bios))
8033                 goto out;
8034
8035         if (dip->errors) {
8036                 bio_io_error(dip->orig_bio);
8037         } else {
8038                 dip->dio_bio->bi_error = 0;
8039                 bio_endio(dip->orig_bio);
8040         }
8041 out:
8042         bio_put(bio);
8043 }
8044
8045 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
8046                                        u64 first_sector, gfp_t gfp_flags)
8047 {
8048         struct bio *bio;
8049         bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
8050         if (bio)
8051                 bio_associate_current(bio);
8052         return bio;
8053 }
8054
8055 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
8056                                                  struct inode *inode,
8057                                                  struct btrfs_dio_private *dip,
8058                                                  struct bio *bio,
8059                                                  u64 file_offset)
8060 {
8061         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8062         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8063         int ret;
8064
8065         /*
8066          * We load all the csum data we need when we submit
8067          * the first bio to reduce the csum tree search and
8068          * contention.
8069          */
8070         if (dip->logical_offset == file_offset) {
8071                 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
8072                                                 file_offset);
8073                 if (ret)
8074                         return ret;
8075         }
8076
8077         if (bio == dip->orig_bio)
8078                 return 0;
8079
8080         file_offset -= dip->logical_offset;
8081         file_offset >>= inode->i_sb->s_blocksize_bits;
8082         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8083
8084         return 0;
8085 }
8086
8087 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
8088                                          int rw, u64 file_offset, int skip_sum,
8089                                          int async_submit)
8090 {
8091         struct btrfs_dio_private *dip = bio->bi_private;
8092         int write = rw & REQ_WRITE;
8093         struct btrfs_root *root = BTRFS_I(inode)->root;
8094         int ret;
8095
8096         if (async_submit)
8097                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8098
8099         bio_get(bio);
8100
8101         if (!write) {
8102                 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
8103                                 BTRFS_WQ_ENDIO_DATA);
8104                 if (ret)
8105                         goto err;
8106         }
8107
8108         if (skip_sum)
8109                 goto map;
8110
8111         if (write && async_submit) {
8112                 ret = btrfs_wq_submit_bio(root->fs_info,
8113                                    inode, rw, bio, 0, 0,
8114                                    file_offset,
8115                                    __btrfs_submit_bio_start_direct_io,
8116                                    __btrfs_submit_bio_done);
8117                 goto err;
8118         } else if (write) {
8119                 /*
8120                  * If we aren't doing async submit, calculate the csum of the
8121                  * bio now.
8122                  */
8123                 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
8124                 if (ret)
8125                         goto err;
8126         } else {
8127                 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
8128                                                      file_offset);
8129                 if (ret)
8130                         goto err;
8131         }
8132 map:
8133         ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
8134 err:
8135         bio_put(bio);
8136         return ret;
8137 }
8138
8139 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8140                                     int skip_sum)
8141 {
8142         struct inode *inode = dip->inode;
8143         struct btrfs_root *root = BTRFS_I(inode)->root;
8144         struct bio *bio;
8145         struct bio *orig_bio = dip->orig_bio;
8146         struct bio_vec *bvec = orig_bio->bi_io_vec;
8147         u64 start_sector = orig_bio->bi_iter.bi_sector;
8148         u64 file_offset = dip->logical_offset;
8149         u64 submit_len = 0;
8150         u64 map_length;
8151         int nr_pages = 0;
8152         int ret;
8153         int async_submit = 0;
8154
8155         map_length = orig_bio->bi_iter.bi_size;
8156         ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
8157                               &map_length, NULL, 0);
8158         if (ret)
8159                 return -EIO;
8160
8161         if (map_length >= orig_bio->bi_iter.bi_size) {
8162                 bio = orig_bio;
8163                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8164                 goto submit;
8165         }
8166
8167         /* async crcs make it difficult to collect full stripe writes. */
8168         if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8169                 async_submit = 0;
8170         else
8171                 async_submit = 1;
8172
8173         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
8174         if (!bio)
8175                 return -ENOMEM;
8176
8177         bio->bi_private = dip;
8178         bio->bi_end_io = btrfs_end_dio_bio;
8179         btrfs_io_bio(bio)->logical = file_offset;
8180         atomic_inc(&dip->pending_bios);
8181
8182         while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
8183                 if (map_length < submit_len + bvec->bv_len ||
8184                     bio_add_page(bio, bvec->bv_page, bvec->bv_len,
8185                                  bvec->bv_offset) < bvec->bv_len) {
8186                         /*
8187                          * inc the count before we submit the bio so
8188                          * we know the end IO handler won't happen before
8189                          * we inc the count. Otherwise, the dip might get freed
8190                          * before we're done setting it up
8191                          */
8192                         atomic_inc(&dip->pending_bios);
8193                         ret = __btrfs_submit_dio_bio(bio, inode, rw,
8194                                                      file_offset, skip_sum,
8195                                                      async_submit);
8196                         if (ret) {
8197                                 bio_put(bio);
8198                                 atomic_dec(&dip->pending_bios);
8199                                 goto out_err;
8200                         }
8201
8202                         start_sector += submit_len >> 9;
8203                         file_offset += submit_len;
8204
8205                         submit_len = 0;
8206                         nr_pages = 0;
8207
8208                         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8209                                                   start_sector, GFP_NOFS);
8210                         if (!bio)
8211                                 goto out_err;
8212                         bio->bi_private = dip;
8213                         bio->bi_end_io = btrfs_end_dio_bio;
8214                         btrfs_io_bio(bio)->logical = file_offset;
8215
8216                         map_length = orig_bio->bi_iter.bi_size;
8217                         ret = btrfs_map_block(root->fs_info, rw,
8218                                               start_sector << 9,
8219                                               &map_length, NULL, 0);
8220                         if (ret) {
8221                                 bio_put(bio);
8222                                 goto out_err;
8223                         }
8224                 } else {
8225                         submit_len += bvec->bv_len;
8226                         nr_pages++;
8227                         bvec++;
8228                 }
8229         }
8230
8231 submit:
8232         ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
8233                                      async_submit);
8234         if (!ret)
8235                 return 0;
8236
8237         bio_put(bio);
8238 out_err:
8239         dip->errors = 1;
8240         /*
8241          * before atomic variable goto zero, we must
8242          * make sure dip->errors is perceived to be set.
8243          */
8244         smp_mb__before_atomic();
8245         if (atomic_dec_and_test(&dip->pending_bios))
8246                 bio_io_error(dip->orig_bio);
8247
8248         /* bio_end_io() will handle error, so we needn't return it */
8249         return 0;
8250 }
8251
8252 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8253                                 struct inode *inode, loff_t file_offset)
8254 {
8255         struct btrfs_dio_private *dip = NULL;
8256         struct bio *io_bio = NULL;
8257         struct btrfs_io_bio *btrfs_bio;
8258         int skip_sum;
8259         int write = rw & REQ_WRITE;
8260         int ret = 0;
8261
8262         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8263
8264         io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
8265         if (!io_bio) {
8266                 ret = -ENOMEM;
8267                 goto free_ordered;
8268         }
8269
8270         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8271         if (!dip) {
8272                 ret = -ENOMEM;
8273                 goto free_ordered;
8274         }
8275
8276         dip->private = dio_bio->bi_private;
8277         dip->inode = inode;
8278         dip->logical_offset = file_offset;
8279         dip->bytes = dio_bio->bi_iter.bi_size;
8280         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8281         io_bio->bi_private = dip;
8282         dip->orig_bio = io_bio;
8283         dip->dio_bio = dio_bio;
8284         atomic_set(&dip->pending_bios, 0);
8285         btrfs_bio = btrfs_io_bio(io_bio);
8286         btrfs_bio->logical = file_offset;
8287
8288         if (write) {
8289                 io_bio->bi_end_io = btrfs_endio_direct_write;
8290         } else {
8291                 io_bio->bi_end_io = btrfs_endio_direct_read;
8292                 dip->subio_endio = btrfs_subio_endio_read;
8293         }
8294
8295         ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
8296         if (!ret)
8297                 return;
8298
8299         if (btrfs_bio->end_io)
8300                 btrfs_bio->end_io(btrfs_bio, ret);
8301
8302 free_ordered:
8303         /*
8304          * If we arrived here it means either we failed to submit the dip
8305          * or we either failed to clone the dio_bio or failed to allocate the
8306          * dip. If we cloned the dio_bio and allocated the dip, we can just
8307          * call bio_endio against our io_bio so that we get proper resource
8308          * cleanup if we fail to submit the dip, otherwise, we must do the
8309          * same as btrfs_endio_direct_[write|read] because we can't call these
8310          * callbacks - they require an allocated dip and a clone of dio_bio.
8311          */
8312         if (io_bio && dip) {
8313                 io_bio->bi_error = -EIO;
8314                 bio_endio(io_bio);
8315                 /*
8316                  * The end io callbacks free our dip, do the final put on io_bio
8317                  * and all the cleanup and final put for dio_bio (through
8318                  * dio_end_io()).
8319                  */
8320                 dip = NULL;
8321                 io_bio = NULL;
8322         } else {
8323                 if (write) {
8324                         struct btrfs_ordered_extent *ordered;
8325
8326                         ordered = btrfs_lookup_ordered_extent(inode,
8327                                                               file_offset);
8328                         set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
8329                         /*
8330                          * Decrements our ref on the ordered extent and removes
8331                          * the ordered extent from the inode's ordered tree,
8332                          * doing all the proper resource cleanup such as for the
8333                          * reserved space and waking up any waiters for this
8334                          * ordered extent (through btrfs_remove_ordered_extent).
8335                          */
8336                         btrfs_finish_ordered_io(ordered);
8337                 } else {
8338                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8339                               file_offset + dio_bio->bi_iter.bi_size - 1);
8340                 }
8341                 dio_bio->bi_error = -EIO;
8342                 /*
8343                  * Releases and cleans up our dio_bio, no need to bio_put()
8344                  * nor bio_endio()/bio_io_error() against dio_bio.
8345                  */
8346                 dio_end_io(dio_bio, ret);
8347         }
8348         if (io_bio)
8349                 bio_put(io_bio);
8350         kfree(dip);
8351 }
8352
8353 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
8354                         const struct iov_iter *iter, loff_t offset)
8355 {
8356         int seg;
8357         int i;
8358         unsigned blocksize_mask = root->sectorsize - 1;
8359         ssize_t retval = -EINVAL;
8360
8361         if (offset & blocksize_mask)
8362                 goto out;
8363
8364         if (iov_iter_alignment(iter) & blocksize_mask)
8365                 goto out;
8366
8367         /* If this is a write we don't need to check anymore */
8368         if (iov_iter_rw(iter) == WRITE)
8369                 return 0;
8370         /*
8371          * Check to make sure we don't have duplicate iov_base's in this
8372          * iovec, if so return EINVAL, otherwise we'll get csum errors
8373          * when reading back.
8374          */
8375         for (seg = 0; seg < iter->nr_segs; seg++) {
8376                 for (i = seg + 1; i < iter->nr_segs; i++) {
8377                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8378                                 goto out;
8379                 }
8380         }
8381         retval = 0;
8382 out:
8383         return retval;
8384 }
8385
8386 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8387                                loff_t offset)
8388 {
8389         struct file *file = iocb->ki_filp;
8390         struct inode *inode = file->f_mapping->host;
8391         struct btrfs_root *root = BTRFS_I(inode)->root;
8392         struct btrfs_dio_data dio_data = { 0 };
8393         size_t count = 0;
8394         int flags = 0;
8395         bool wakeup = true;
8396         bool relock = false;
8397         ssize_t ret;
8398
8399         if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
8400                 return 0;
8401
8402         inode_dio_begin(inode);
8403         smp_mb__after_atomic();
8404
8405         /*
8406          * The generic stuff only does filemap_write_and_wait_range, which
8407          * isn't enough if we've written compressed pages to this area, so
8408          * we need to flush the dirty pages again to make absolutely sure
8409          * that any outstanding dirty pages are on disk.
8410          */
8411         count = iov_iter_count(iter);
8412         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8413                      &BTRFS_I(inode)->runtime_flags))
8414                 filemap_fdatawrite_range(inode->i_mapping, offset,
8415                                          offset + count - 1);
8416
8417         if (iov_iter_rw(iter) == WRITE) {
8418                 /*
8419                  * If the write DIO is beyond the EOF, we need update
8420                  * the isize, but it is protected by i_mutex. So we can
8421                  * not unlock the i_mutex at this case.
8422                  */
8423                 if (offset + count <= inode->i_size) {
8424                         mutex_unlock(&inode->i_mutex);
8425                         relock = true;
8426                 }
8427                 ret = btrfs_delalloc_reserve_space(inode, count);
8428                 if (ret)
8429                         goto out;
8430                 dio_data.outstanding_extents = div64_u64(count +
8431                                                 BTRFS_MAX_EXTENT_SIZE - 1,
8432                                                 BTRFS_MAX_EXTENT_SIZE);
8433
8434                 /*
8435                  * We need to know how many extents we reserved so that we can
8436                  * do the accounting properly if we go over the number we
8437                  * originally calculated.  Abuse current->journal_info for this.
8438                  */
8439                 dio_data.reserve = round_up(count, root->sectorsize);
8440                 current->journal_info = &dio_data;
8441         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8442                                      &BTRFS_I(inode)->runtime_flags)) {
8443                 inode_dio_end(inode);
8444                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8445                 wakeup = false;
8446         }
8447
8448         ret = __blockdev_direct_IO(iocb, inode,
8449                                    BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
8450                                    iter, offset, btrfs_get_blocks_direct, NULL,
8451                                    btrfs_submit_direct, flags);
8452         if (iov_iter_rw(iter) == WRITE) {
8453                 current->journal_info = NULL;
8454                 if (ret < 0 && ret != -EIOCBQUEUED) {
8455                         if (dio_data.reserve)
8456                                 btrfs_delalloc_release_space(inode,
8457                                                         dio_data.reserve);
8458                 } else if (ret >= 0 && (size_t)ret < count)
8459                         btrfs_delalloc_release_space(inode,
8460                                                      count - (size_t)ret);
8461         }
8462 out:
8463         if (wakeup)
8464                 inode_dio_end(inode);
8465         if (relock)
8466                 mutex_lock(&inode->i_mutex);
8467
8468         return ret;
8469 }
8470
8471 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8472
8473 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8474                 __u64 start, __u64 len)
8475 {
8476         int     ret;
8477
8478         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8479         if (ret)
8480                 return ret;
8481
8482         return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8483 }
8484
8485 int btrfs_readpage(struct file *file, struct page *page)
8486 {
8487         struct extent_io_tree *tree;
8488         tree = &BTRFS_I(page->mapping->host)->io_tree;
8489         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8490 }
8491
8492 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8493 {
8494         struct extent_io_tree *tree;
8495
8496
8497         if (current->flags & PF_MEMALLOC) {
8498                 redirty_page_for_writepage(wbc, page);
8499                 unlock_page(page);
8500                 return 0;
8501         }
8502         tree = &BTRFS_I(page->mapping->host)->io_tree;
8503         return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
8504 }
8505
8506 static int btrfs_writepages(struct address_space *mapping,
8507                             struct writeback_control *wbc)
8508 {
8509         struct extent_io_tree *tree;
8510
8511         tree = &BTRFS_I(mapping->host)->io_tree;
8512         return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
8513 }
8514
8515 static int
8516 btrfs_readpages(struct file *file, struct address_space *mapping,
8517                 struct list_head *pages, unsigned nr_pages)
8518 {
8519         struct extent_io_tree *tree;
8520         tree = &BTRFS_I(mapping->host)->io_tree;
8521         return extent_readpages(tree, mapping, pages, nr_pages,
8522                                 btrfs_get_extent);
8523 }
8524 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8525 {
8526         struct extent_io_tree *tree;
8527         struct extent_map_tree *map;
8528         int ret;
8529
8530         tree = &BTRFS_I(page->mapping->host)->io_tree;
8531         map = &BTRFS_I(page->mapping->host)->extent_tree;
8532         ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8533         if (ret == 1) {
8534                 ClearPagePrivate(page);
8535                 set_page_private(page, 0);
8536                 page_cache_release(page);
8537         }
8538         return ret;
8539 }
8540
8541 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8542 {
8543         if (PageWriteback(page) || PageDirty(page))
8544                 return 0;
8545         return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
8546 }
8547
8548 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8549                                  unsigned int length)
8550 {
8551         struct inode *inode = page->mapping->host;
8552         struct extent_io_tree *tree;
8553         struct btrfs_ordered_extent *ordered;
8554         struct extent_state *cached_state = NULL;
8555         u64 page_start = page_offset(page);
8556         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
8557         int inode_evicting = inode->i_state & I_FREEING;
8558
8559         /*
8560          * we have the page locked, so new writeback can't start,
8561          * and the dirty bit won't be cleared while we are here.
8562          *
8563          * Wait for IO on this page so that we can safely clear
8564          * the PagePrivate2 bit and do ordered accounting
8565          */
8566         wait_on_page_writeback(page);
8567
8568         tree = &BTRFS_I(inode)->io_tree;
8569         if (offset) {
8570                 btrfs_releasepage(page, GFP_NOFS);
8571                 return;
8572         }
8573
8574         if (!inode_evicting)
8575                 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
8576         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8577         if (ordered) {
8578                 /*
8579                  * IO on this page will never be started, so we need
8580                  * to account for any ordered extents now
8581                  */
8582                 if (!inode_evicting)
8583                         clear_extent_bit(tree, page_start, page_end,
8584                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8585                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8586                                          EXTENT_DEFRAG, 1, 0, &cached_state,
8587                                          GFP_NOFS);
8588                 /*
8589                  * whoever cleared the private bit is responsible
8590                  * for the finish_ordered_io
8591                  */
8592                 if (TestClearPagePrivate2(page)) {
8593                         struct btrfs_ordered_inode_tree *tree;
8594                         u64 new_len;
8595
8596                         tree = &BTRFS_I(inode)->ordered_tree;
8597
8598                         spin_lock_irq(&tree->lock);
8599                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8600                         new_len = page_start - ordered->file_offset;
8601                         if (new_len < ordered->truncated_len)
8602                                 ordered->truncated_len = new_len;
8603                         spin_unlock_irq(&tree->lock);
8604
8605                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8606                                                            page_start,
8607                                                            PAGE_CACHE_SIZE, 1))
8608                                 btrfs_finish_ordered_io(ordered);
8609                 }
8610                 btrfs_put_ordered_extent(ordered);
8611                 if (!inode_evicting) {
8612                         cached_state = NULL;
8613                         lock_extent_bits(tree, page_start, page_end, 0,
8614                                          &cached_state);
8615                 }
8616         }
8617
8618         if (!inode_evicting) {
8619                 clear_extent_bit(tree, page_start, page_end,
8620                                  EXTENT_LOCKED | EXTENT_DIRTY |
8621                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8622                                  EXTENT_DEFRAG, 1, 1,
8623                                  &cached_state, GFP_NOFS);
8624
8625                 __btrfs_releasepage(page, GFP_NOFS);
8626         }
8627
8628         ClearPageChecked(page);
8629         if (PagePrivate(page)) {
8630                 ClearPagePrivate(page);
8631                 set_page_private(page, 0);
8632                 page_cache_release(page);
8633         }
8634 }
8635
8636 /*
8637  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8638  * called from a page fault handler when a page is first dirtied. Hence we must
8639  * be careful to check for EOF conditions here. We set the page up correctly
8640  * for a written page which means we get ENOSPC checking when writing into
8641  * holes and correct delalloc and unwritten extent mapping on filesystems that
8642  * support these features.
8643  *
8644  * We are not allowed to take the i_mutex here so we have to play games to
8645  * protect against truncate races as the page could now be beyond EOF.  Because
8646  * vmtruncate() writes the inode size before removing pages, once we have the
8647  * page lock we can determine safely if the page is beyond EOF. If it is not
8648  * beyond EOF, then the page is guaranteed safe against truncation until we
8649  * unlock the page.
8650  */
8651 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8652 {
8653         struct page *page = vmf->page;
8654         struct inode *inode = file_inode(vma->vm_file);
8655         struct btrfs_root *root = BTRFS_I(inode)->root;
8656         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8657         struct btrfs_ordered_extent *ordered;
8658         struct extent_state *cached_state = NULL;
8659         char *kaddr;
8660         unsigned long zero_start;
8661         loff_t size;
8662         int ret;
8663         int reserved = 0;
8664         u64 page_start;
8665         u64 page_end;
8666
8667         sb_start_pagefault(inode->i_sb);
8668         ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
8669         if (!ret) {
8670                 ret = file_update_time(vma->vm_file);
8671                 reserved = 1;
8672         }
8673         if (ret) {
8674                 if (ret == -ENOMEM)
8675                         ret = VM_FAULT_OOM;
8676                 else /* -ENOSPC, -EIO, etc */
8677                         ret = VM_FAULT_SIGBUS;
8678                 if (reserved)
8679                         goto out;
8680                 goto out_noreserve;
8681         }
8682
8683         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8684 again:
8685         lock_page(page);
8686         size = i_size_read(inode);
8687         page_start = page_offset(page);
8688         page_end = page_start + PAGE_CACHE_SIZE - 1;
8689
8690         if ((page->mapping != inode->i_mapping) ||
8691             (page_start >= size)) {
8692                 /* page got truncated out from underneath us */
8693                 goto out_unlock;
8694         }
8695         wait_on_page_writeback(page);
8696
8697         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
8698         set_page_extent_mapped(page);
8699
8700         /*
8701          * we can't set the delalloc bits if there are pending ordered
8702          * extents.  Drop our locks and wait for them to finish
8703          */
8704         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8705         if (ordered) {
8706                 unlock_extent_cached(io_tree, page_start, page_end,
8707                                      &cached_state, GFP_NOFS);
8708                 unlock_page(page);
8709                 btrfs_start_ordered_extent(inode, ordered, 1);
8710                 btrfs_put_ordered_extent(ordered);
8711                 goto again;
8712         }
8713
8714         /*
8715          * XXX - page_mkwrite gets called every time the page is dirtied, even
8716          * if it was already dirty, so for space accounting reasons we need to
8717          * clear any delalloc bits for the range we are fixing to save.  There
8718          * is probably a better way to do this, but for now keep consistent with
8719          * prepare_pages in the normal write path.
8720          */
8721         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
8722                           EXTENT_DIRTY | EXTENT_DELALLOC |
8723                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
8724                           0, 0, &cached_state, GFP_NOFS);
8725
8726         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
8727                                         &cached_state);
8728         if (ret) {
8729                 unlock_extent_cached(io_tree, page_start, page_end,
8730                                      &cached_state, GFP_NOFS);
8731                 ret = VM_FAULT_SIGBUS;
8732                 goto out_unlock;
8733         }
8734         ret = 0;
8735
8736         /* page is wholly or partially inside EOF */
8737         if (page_start + PAGE_CACHE_SIZE > size)
8738                 zero_start = size & ~PAGE_CACHE_MASK;
8739         else
8740                 zero_start = PAGE_CACHE_SIZE;
8741
8742         if (zero_start != PAGE_CACHE_SIZE) {
8743                 kaddr = kmap(page);
8744                 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
8745                 flush_dcache_page(page);
8746                 kunmap(page);
8747         }
8748         ClearPageChecked(page);
8749         set_page_dirty(page);
8750         SetPageUptodate(page);
8751
8752         BTRFS_I(inode)->last_trans = root->fs_info->generation;
8753         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
8754         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
8755
8756         unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
8757
8758 out_unlock:
8759         if (!ret) {
8760                 sb_end_pagefault(inode->i_sb);
8761                 return VM_FAULT_LOCKED;
8762         }
8763         unlock_page(page);
8764 out:
8765         btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
8766 out_noreserve:
8767         sb_end_pagefault(inode->i_sb);
8768         return ret;
8769 }
8770
8771 static int btrfs_truncate(struct inode *inode)
8772 {
8773         struct btrfs_root *root = BTRFS_I(inode)->root;
8774         struct btrfs_block_rsv *rsv;
8775         int ret = 0;
8776         int err = 0;
8777         struct btrfs_trans_handle *trans;
8778         u64 mask = root->sectorsize - 1;
8779         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
8780
8781         ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8782                                        (u64)-1);
8783         if (ret)
8784                 return ret;
8785
8786         /*
8787          * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
8788          * 3 things going on here
8789          *
8790          * 1) We need to reserve space for our orphan item and the space to
8791          * delete our orphan item.  Lord knows we don't want to have a dangling
8792          * orphan item because we didn't reserve space to remove it.
8793          *
8794          * 2) We need to reserve space to update our inode.
8795          *
8796          * 3) We need to have something to cache all the space that is going to
8797          * be free'd up by the truncate operation, but also have some slack
8798          * space reserved in case it uses space during the truncate (thank you
8799          * very much snapshotting).
8800          *
8801          * And we need these to all be seperate.  The fact is we can use alot of
8802          * space doing the truncate, and we have no earthly idea how much space
8803          * we will use, so we need the truncate reservation to be seperate so it
8804          * doesn't end up using space reserved for updating the inode or
8805          * removing the orphan item.  We also need to be able to stop the
8806          * transaction and start a new one, which means we need to be able to
8807          * update the inode several times, and we have no idea of knowing how
8808          * many times that will be, so we can't just reserve 1 item for the
8809          * entirety of the opration, so that has to be done seperately as well.
8810          * Then there is the orphan item, which does indeed need to be held on
8811          * to for the whole operation, and we need nobody to touch this reserved
8812          * space except the orphan code.
8813          *
8814          * So that leaves us with
8815          *
8816          * 1) root->orphan_block_rsv - for the orphan deletion.
8817          * 2) rsv - for the truncate reservation, which we will steal from the
8818          * transaction reservation.
8819          * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
8820          * updating the inode.
8821          */
8822         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
8823         if (!rsv)
8824                 return -ENOMEM;
8825         rsv->size = min_size;
8826         rsv->failfast = 1;
8827
8828         /*
8829          * 1 for the truncate slack space
8830          * 1 for updating the inode.
8831          */
8832         trans = btrfs_start_transaction(root, 2);
8833         if (IS_ERR(trans)) {
8834                 err = PTR_ERR(trans);
8835                 goto out;
8836         }
8837
8838         /* Migrate the slack space for the truncate to our reserve */
8839         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
8840                                       min_size);
8841         BUG_ON(ret);
8842
8843         /*
8844          * So if we truncate and then write and fsync we normally would just
8845          * write the extents that changed, which is a problem if we need to
8846          * first truncate that entire inode.  So set this flag so we write out
8847          * all of the extents in the inode to the sync log so we're completely
8848          * safe.
8849          */
8850         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
8851         trans->block_rsv = rsv;
8852
8853         while (1) {
8854                 ret = btrfs_truncate_inode_items(trans, root, inode,
8855                                                  inode->i_size,
8856                                                  BTRFS_EXTENT_DATA_KEY);
8857                 if (ret != -ENOSPC && ret != -EAGAIN) {
8858                         err = ret;
8859                         break;
8860                 }
8861
8862                 trans->block_rsv = &root->fs_info->trans_block_rsv;
8863                 ret = btrfs_update_inode(trans, root, inode);
8864                 if (ret) {
8865                         err = ret;
8866                         break;
8867                 }
8868
8869                 btrfs_end_transaction(trans, root);
8870                 btrfs_btree_balance_dirty(root);
8871
8872                 trans = btrfs_start_transaction(root, 2);
8873                 if (IS_ERR(trans)) {
8874                         ret = err = PTR_ERR(trans);
8875                         trans = NULL;
8876                         break;
8877                 }
8878
8879                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
8880                                               rsv, min_size);
8881                 BUG_ON(ret);    /* shouldn't happen */
8882                 trans->block_rsv = rsv;
8883         }
8884
8885         if (ret == 0 && inode->i_nlink > 0) {
8886                 trans->block_rsv = root->orphan_block_rsv;
8887                 ret = btrfs_orphan_del(trans, inode);
8888                 if (ret)
8889                         err = ret;
8890         }
8891
8892         if (trans) {
8893                 trans->block_rsv = &root->fs_info->trans_block_rsv;
8894                 ret = btrfs_update_inode(trans, root, inode);
8895                 if (ret && !err)
8896                         err = ret;
8897
8898                 ret = btrfs_end_transaction(trans, root);
8899                 btrfs_btree_balance_dirty(root);
8900         }
8901
8902 out:
8903         btrfs_free_block_rsv(root, rsv);
8904
8905         if (ret && !err)
8906                 err = ret;
8907
8908         return err;
8909 }
8910
8911 /*
8912  * create a new subvolume directory/inode (helper for the ioctl).
8913  */
8914 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
8915                              struct btrfs_root *new_root,
8916                              struct btrfs_root *parent_root,
8917                              u64 new_dirid)
8918 {
8919         struct inode *inode;
8920         int err;
8921         u64 index = 0;
8922
8923         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
8924                                 new_dirid, new_dirid,
8925                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
8926                                 &index);
8927         if (IS_ERR(inode))
8928                 return PTR_ERR(inode);
8929         inode->i_op = &btrfs_dir_inode_operations;
8930         inode->i_fop = &btrfs_dir_file_operations;
8931
8932         set_nlink(inode, 1);
8933         btrfs_i_size_write(inode, 0);
8934         unlock_new_inode(inode);
8935
8936         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
8937         if (err)
8938                 btrfs_err(new_root->fs_info,
8939                           "error inheriting subvolume %llu properties: %d",
8940                           new_root->root_key.objectid, err);
8941
8942         err = btrfs_update_inode(trans, new_root, inode);
8943
8944         iput(inode);
8945         return err;
8946 }
8947
8948 struct inode *btrfs_alloc_inode(struct super_block *sb)
8949 {
8950         struct btrfs_inode *ei;
8951         struct inode *inode;
8952
8953         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
8954         if (!ei)
8955                 return NULL;
8956
8957         ei->root = NULL;
8958         ei->generation = 0;
8959         ei->last_trans = 0;
8960         ei->last_sub_trans = 0;
8961         ei->logged_trans = 0;
8962         ei->delalloc_bytes = 0;
8963         ei->defrag_bytes = 0;
8964         ei->disk_i_size = 0;
8965         ei->flags = 0;
8966         ei->csum_bytes = 0;
8967         ei->index_cnt = (u64)-1;
8968         ei->dir_index = 0;
8969         ei->last_unlink_trans = 0;
8970         ei->last_log_commit = 0;
8971
8972         spin_lock_init(&ei->lock);
8973         ei->outstanding_extents = 0;
8974         ei->reserved_extents = 0;
8975
8976         ei->runtime_flags = 0;
8977         ei->force_compress = BTRFS_COMPRESS_NONE;
8978
8979         ei->delayed_node = NULL;
8980
8981         ei->i_otime.tv_sec = 0;
8982         ei->i_otime.tv_nsec = 0;
8983
8984         inode = &ei->vfs_inode;
8985         extent_map_tree_init(&ei->extent_tree);
8986         extent_io_tree_init(&ei->io_tree, &inode->i_data);
8987         extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
8988         ei->io_tree.track_uptodate = 1;
8989         ei->io_failure_tree.track_uptodate = 1;
8990         atomic_set(&ei->sync_writers, 0);
8991         mutex_init(&ei->log_mutex);
8992         mutex_init(&ei->delalloc_mutex);
8993         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8994         INIT_LIST_HEAD(&ei->delalloc_inodes);
8995         RB_CLEAR_NODE(&ei->rb_node);
8996
8997         return inode;
8998 }
8999
9000 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9001 void btrfs_test_destroy_inode(struct inode *inode)
9002 {
9003         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9004         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9005 }
9006 #endif
9007
9008 static void btrfs_i_callback(struct rcu_head *head)
9009 {
9010         struct inode *inode = container_of(head, struct inode, i_rcu);
9011         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9012 }
9013
9014 void btrfs_destroy_inode(struct inode *inode)
9015 {
9016         struct btrfs_ordered_extent *ordered;
9017         struct btrfs_root *root = BTRFS_I(inode)->root;
9018
9019         WARN_ON(!hlist_empty(&inode->i_dentry));
9020         WARN_ON(inode->i_data.nrpages);
9021         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9022         WARN_ON(BTRFS_I(inode)->reserved_extents);
9023         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9024         WARN_ON(BTRFS_I(inode)->csum_bytes);
9025         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9026
9027         /*
9028          * This can happen where we create an inode, but somebody else also
9029          * created the same inode and we need to destroy the one we already
9030          * created.
9031          */
9032         if (!root)
9033                 goto free;
9034
9035         if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9036                      &BTRFS_I(inode)->runtime_flags)) {
9037                 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
9038                         btrfs_ino(inode));
9039                 atomic_dec(&root->orphan_inodes);
9040         }
9041
9042         while (1) {
9043                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9044                 if (!ordered)
9045                         break;
9046                 else {
9047                         btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
9048                                 ordered->file_offset, ordered->len);
9049                         btrfs_remove_ordered_extent(inode, ordered);
9050                         btrfs_put_ordered_extent(ordered);
9051                         btrfs_put_ordered_extent(ordered);
9052                 }
9053         }
9054         inode_tree_del(inode);
9055         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9056 free:
9057         call_rcu(&inode->i_rcu, btrfs_i_callback);
9058 }
9059
9060 int btrfs_drop_inode(struct inode *inode)
9061 {
9062         struct btrfs_root *root = BTRFS_I(inode)->root;
9063
9064         if (root == NULL)
9065                 return 1;
9066
9067         /* the snap/subvol tree is on deleting */
9068         if (btrfs_root_refs(&root->root_item) == 0)
9069                 return 1;
9070         else
9071                 return generic_drop_inode(inode);
9072 }
9073
9074 static void init_once(void *foo)
9075 {
9076         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9077
9078         inode_init_once(&ei->vfs_inode);
9079 }
9080
9081 void btrfs_destroy_cachep(void)
9082 {
9083         /*
9084          * Make sure all delayed rcu free inodes are flushed before we
9085          * destroy cache.
9086          */
9087         rcu_barrier();
9088         if (btrfs_inode_cachep)
9089                 kmem_cache_destroy(btrfs_inode_cachep);
9090         if (btrfs_trans_handle_cachep)
9091                 kmem_cache_destroy(btrfs_trans_handle_cachep);
9092         if (btrfs_transaction_cachep)
9093                 kmem_cache_destroy(btrfs_transaction_cachep);
9094         if (btrfs_path_cachep)
9095                 kmem_cache_destroy(btrfs_path_cachep);
9096         if (btrfs_free_space_cachep)
9097                 kmem_cache_destroy(btrfs_free_space_cachep);
9098         if (btrfs_delalloc_work_cachep)
9099                 kmem_cache_destroy(btrfs_delalloc_work_cachep);
9100 }
9101
9102 int btrfs_init_cachep(void)
9103 {
9104         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9105                         sizeof(struct btrfs_inode), 0,
9106                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
9107         if (!btrfs_inode_cachep)
9108                 goto fail;
9109
9110         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9111                         sizeof(struct btrfs_trans_handle), 0,
9112                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9113         if (!btrfs_trans_handle_cachep)
9114                 goto fail;
9115
9116         btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9117                         sizeof(struct btrfs_transaction), 0,
9118                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9119         if (!btrfs_transaction_cachep)
9120                 goto fail;
9121
9122         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9123                         sizeof(struct btrfs_path), 0,
9124                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9125         if (!btrfs_path_cachep)
9126                 goto fail;
9127
9128         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9129                         sizeof(struct btrfs_free_space), 0,
9130                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9131         if (!btrfs_free_space_cachep)
9132                 goto fail;
9133
9134         btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
9135                         sizeof(struct btrfs_delalloc_work), 0,
9136                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
9137                         NULL);
9138         if (!btrfs_delalloc_work_cachep)
9139                 goto fail;
9140
9141         return 0;
9142 fail:
9143         btrfs_destroy_cachep();
9144         return -ENOMEM;
9145 }
9146
9147 static int btrfs_getattr(struct vfsmount *mnt,
9148                          struct dentry *dentry, struct kstat *stat)
9149 {
9150         u64 delalloc_bytes;
9151         struct inode *inode = d_inode(dentry);
9152         u32 blocksize = inode->i_sb->s_blocksize;
9153
9154         generic_fillattr(inode, stat);
9155         stat->dev = BTRFS_I(inode)->root->anon_dev;
9156         stat->blksize = PAGE_CACHE_SIZE;
9157
9158         spin_lock(&BTRFS_I(inode)->lock);
9159         delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
9160         spin_unlock(&BTRFS_I(inode)->lock);
9161         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9162                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9163         return 0;
9164 }
9165
9166 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9167                            struct inode *new_dir, struct dentry *new_dentry)
9168 {
9169         struct btrfs_trans_handle *trans;
9170         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9171         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9172         struct inode *new_inode = d_inode(new_dentry);
9173         struct inode *old_inode = d_inode(old_dentry);
9174         struct timespec ctime = CURRENT_TIME;
9175         u64 index = 0;
9176         u64 root_objectid;
9177         int ret;
9178         u64 old_ino = btrfs_ino(old_inode);
9179
9180         if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9181                 return -EPERM;
9182
9183         /* we only allow rename subvolume link between subvolumes */
9184         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9185                 return -EXDEV;
9186
9187         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9188             (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
9189                 return -ENOTEMPTY;
9190
9191         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9192             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9193                 return -ENOTEMPTY;
9194
9195
9196         /* check for collisions, even if the  name isn't there */
9197         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9198                              new_dentry->d_name.name,
9199                              new_dentry->d_name.len);
9200
9201         if (ret) {
9202                 if (ret == -EEXIST) {
9203                         /* we shouldn't get
9204                          * eexist without a new_inode */
9205                         if (WARN_ON(!new_inode)) {
9206                                 return ret;
9207                         }
9208                 } else {
9209                         /* maybe -EOVERFLOW */
9210                         return ret;
9211                 }
9212         }
9213         ret = 0;
9214
9215         /*
9216          * we're using rename to replace one file with another.  Start IO on it
9217          * now so  we don't add too much work to the end of the transaction
9218          */
9219         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9220                 filemap_flush(old_inode->i_mapping);
9221
9222         /* close the racy window with snapshot create/destroy ioctl */
9223         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9224                 down_read(&root->fs_info->subvol_sem);
9225         /*
9226          * We want to reserve the absolute worst case amount of items.  So if
9227          * both inodes are subvols and we need to unlink them then that would
9228          * require 4 item modifications, but if they are both normal inodes it
9229          * would require 5 item modifications, so we'll assume their normal
9230          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9231          * should cover the worst case number of items we'll modify.
9232          */
9233         trans = btrfs_start_transaction(root, 11);
9234         if (IS_ERR(trans)) {
9235                 ret = PTR_ERR(trans);
9236                 goto out_notrans;
9237         }
9238
9239         if (dest != root)
9240                 btrfs_record_root_in_trans(trans, dest);
9241
9242         ret = btrfs_set_inode_index(new_dir, &index);
9243         if (ret)
9244                 goto out_fail;
9245
9246         BTRFS_I(old_inode)->dir_index = 0ULL;
9247         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9248                 /* force full log commit if subvolume involved. */
9249                 btrfs_set_log_full_commit(root->fs_info, trans);
9250         } else {
9251                 ret = btrfs_insert_inode_ref(trans, dest,
9252                                              new_dentry->d_name.name,
9253                                              new_dentry->d_name.len,
9254                                              old_ino,
9255                                              btrfs_ino(new_dir), index);
9256                 if (ret)
9257                         goto out_fail;
9258                 /*
9259                  * this is an ugly little race, but the rename is required
9260                  * to make sure that if we crash, the inode is either at the
9261                  * old name or the new one.  pinning the log transaction lets
9262                  * us make sure we don't allow a log commit to come in after
9263                  * we unlink the name but before we add the new name back in.
9264                  */
9265                 btrfs_pin_log_trans(root);
9266         }
9267
9268         inode_inc_iversion(old_dir);
9269         inode_inc_iversion(new_dir);
9270         inode_inc_iversion(old_inode);
9271         old_dir->i_ctime = old_dir->i_mtime = ctime;
9272         new_dir->i_ctime = new_dir->i_mtime = ctime;
9273         old_inode->i_ctime = ctime;
9274
9275         if (old_dentry->d_parent != new_dentry->d_parent)
9276                 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
9277
9278         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9279                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9280                 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9281                                         old_dentry->d_name.name,
9282                                         old_dentry->d_name.len);
9283         } else {
9284                 ret = __btrfs_unlink_inode(trans, root, old_dir,
9285                                         d_inode(old_dentry),
9286                                         old_dentry->d_name.name,
9287                                         old_dentry->d_name.len);
9288                 if (!ret)
9289                         ret = btrfs_update_inode(trans, root, old_inode);
9290         }
9291         if (ret) {
9292                 btrfs_abort_transaction(trans, root, ret);
9293                 goto out_fail;
9294         }
9295
9296         if (new_inode) {
9297                 inode_inc_iversion(new_inode);
9298                 new_inode->i_ctime = CURRENT_TIME;
9299                 if (unlikely(btrfs_ino(new_inode) ==
9300                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9301                         root_objectid = BTRFS_I(new_inode)->location.objectid;
9302                         ret = btrfs_unlink_subvol(trans, dest, new_dir,
9303                                                 root_objectid,
9304                                                 new_dentry->d_name.name,
9305                                                 new_dentry->d_name.len);
9306                         BUG_ON(new_inode->i_nlink == 0);
9307                 } else {
9308                         ret = btrfs_unlink_inode(trans, dest, new_dir,
9309                                                  d_inode(new_dentry),
9310                                                  new_dentry->d_name.name,
9311                                                  new_dentry->d_name.len);
9312                 }
9313                 if (!ret && new_inode->i_nlink == 0)
9314                         ret = btrfs_orphan_add(trans, d_inode(new_dentry));
9315                 if (ret) {
9316                         btrfs_abort_transaction(trans, root, ret);
9317                         goto out_fail;
9318                 }
9319         }
9320
9321         ret = btrfs_add_link(trans, new_dir, old_inode,
9322                              new_dentry->d_name.name,
9323                              new_dentry->d_name.len, 0, index);
9324         if (ret) {
9325                 btrfs_abort_transaction(trans, root, ret);
9326                 goto out_fail;
9327         }
9328
9329         if (old_inode->i_nlink == 1)
9330                 BTRFS_I(old_inode)->dir_index = index;
9331
9332         if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
9333                 struct dentry *parent = new_dentry->d_parent;
9334                 btrfs_log_new_name(trans, old_inode, old_dir, parent);
9335                 btrfs_end_log_trans(root);
9336         }
9337 out_fail:
9338         btrfs_end_transaction(trans, root);
9339 out_notrans:
9340         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9341                 up_read(&root->fs_info->subvol_sem);
9342
9343         return ret;
9344 }
9345
9346 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9347                          struct inode *new_dir, struct dentry *new_dentry,
9348                          unsigned int flags)
9349 {
9350         if (flags & ~RENAME_NOREPLACE)
9351                 return -EINVAL;
9352
9353         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry);
9354 }
9355
9356 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9357 {
9358         struct btrfs_delalloc_work *delalloc_work;
9359         struct inode *inode;
9360
9361         delalloc_work = container_of(work, struct btrfs_delalloc_work,
9362                                      work);
9363         inode = delalloc_work->inode;
9364         if (delalloc_work->wait) {
9365                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
9366         } else {
9367                 filemap_flush(inode->i_mapping);
9368                 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9369                              &BTRFS_I(inode)->runtime_flags))
9370                         filemap_flush(inode->i_mapping);
9371         }
9372
9373         if (delalloc_work->delay_iput)
9374                 btrfs_add_delayed_iput(inode);
9375         else
9376                 iput(inode);
9377         complete(&delalloc_work->completion);
9378 }
9379
9380 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9381                                                     int wait, int delay_iput)
9382 {
9383         struct btrfs_delalloc_work *work;
9384
9385         work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
9386         if (!work)
9387                 return NULL;
9388
9389         init_completion(&work->completion);
9390         INIT_LIST_HEAD(&work->list);
9391         work->inode = inode;
9392         work->wait = wait;
9393         work->delay_iput = delay_iput;
9394         WARN_ON_ONCE(!inode);
9395         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
9396                         btrfs_run_delalloc_work, NULL, NULL);
9397
9398         return work;
9399 }
9400
9401 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
9402 {
9403         wait_for_completion(&work->completion);
9404         kmem_cache_free(btrfs_delalloc_work_cachep, work);
9405 }
9406
9407 /*
9408  * some fairly slow code that needs optimization. This walks the list
9409  * of all the inodes with pending delalloc and forces them to disk.
9410  */
9411 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
9412                                    int nr)
9413 {
9414         struct btrfs_inode *binode;
9415         struct inode *inode;
9416         struct btrfs_delalloc_work *work, *next;
9417         struct list_head works;
9418         struct list_head splice;
9419         int ret = 0;
9420
9421         INIT_LIST_HEAD(&works);
9422         INIT_LIST_HEAD(&splice);
9423
9424         mutex_lock(&root->delalloc_mutex);
9425         spin_lock(&root->delalloc_lock);
9426         list_splice_init(&root->delalloc_inodes, &splice);
9427         while (!list_empty(&splice)) {
9428                 binode = list_entry(splice.next, struct btrfs_inode,
9429                                     delalloc_inodes);
9430
9431                 list_move_tail(&binode->delalloc_inodes,
9432                                &root->delalloc_inodes);
9433                 inode = igrab(&binode->vfs_inode);
9434                 if (!inode) {
9435                         cond_resched_lock(&root->delalloc_lock);
9436                         continue;
9437                 }
9438                 spin_unlock(&root->delalloc_lock);
9439
9440                 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
9441                 if (!work) {
9442                         if (delay_iput)
9443                                 btrfs_add_delayed_iput(inode);
9444                         else
9445                                 iput(inode);
9446                         ret = -ENOMEM;
9447                         goto out;
9448                 }
9449                 list_add_tail(&work->list, &works);
9450                 btrfs_queue_work(root->fs_info->flush_workers,
9451                                  &work->work);
9452                 ret++;
9453                 if (nr != -1 && ret >= nr)
9454                         goto out;
9455                 cond_resched();
9456                 spin_lock(&root->delalloc_lock);
9457         }
9458         spin_unlock(&root->delalloc_lock);
9459
9460 out:
9461         list_for_each_entry_safe(work, next, &works, list) {
9462                 list_del_init(&work->list);
9463                 btrfs_wait_and_free_delalloc_work(work);
9464         }
9465
9466         if (!list_empty_careful(&splice)) {
9467                 spin_lock(&root->delalloc_lock);
9468                 list_splice_tail(&splice, &root->delalloc_inodes);
9469                 spin_unlock(&root->delalloc_lock);
9470         }
9471         mutex_unlock(&root->delalloc_mutex);
9472         return ret;
9473 }
9474
9475 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
9476 {
9477         int ret;
9478
9479         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
9480                 return -EROFS;
9481
9482         ret = __start_delalloc_inodes(root, delay_iput, -1);
9483         if (ret > 0)
9484                 ret = 0;
9485         /*
9486          * the filemap_flush will queue IO into the worker threads, but
9487          * we have to make sure the IO is actually started and that
9488          * ordered extents get created before we return
9489          */
9490         atomic_inc(&root->fs_info->async_submit_draining);
9491         while (atomic_read(&root->fs_info->nr_async_submits) ||
9492               atomic_read(&root->fs_info->async_delalloc_pages)) {
9493                 wait_event(root->fs_info->async_submit_wait,
9494                    (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
9495                     atomic_read(&root->fs_info->async_delalloc_pages) == 0));
9496         }
9497         atomic_dec(&root->fs_info->async_submit_draining);
9498         return ret;
9499 }
9500
9501 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
9502                                int nr)
9503 {
9504         struct btrfs_root *root;
9505         struct list_head splice;
9506         int ret;
9507
9508         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9509                 return -EROFS;
9510
9511         INIT_LIST_HEAD(&splice);
9512
9513         mutex_lock(&fs_info->delalloc_root_mutex);
9514         spin_lock(&fs_info->delalloc_root_lock);
9515         list_splice_init(&fs_info->delalloc_roots, &splice);
9516         while (!list_empty(&splice) && nr) {
9517                 root = list_first_entry(&splice, struct btrfs_root,
9518                                         delalloc_root);
9519                 root = btrfs_grab_fs_root(root);
9520                 BUG_ON(!root);
9521                 list_move_tail(&root->delalloc_root,
9522                                &fs_info->delalloc_roots);
9523                 spin_unlock(&fs_info->delalloc_root_lock);
9524
9525                 ret = __start_delalloc_inodes(root, delay_iput, nr);
9526                 btrfs_put_fs_root(root);
9527                 if (ret < 0)
9528                         goto out;
9529
9530                 if (nr != -1) {
9531                         nr -= ret;
9532                         WARN_ON(nr < 0);
9533                 }
9534                 spin_lock(&fs_info->delalloc_root_lock);
9535         }
9536         spin_unlock(&fs_info->delalloc_root_lock);
9537
9538         ret = 0;
9539         atomic_inc(&fs_info->async_submit_draining);
9540         while (atomic_read(&fs_info->nr_async_submits) ||
9541               atomic_read(&fs_info->async_delalloc_pages)) {
9542                 wait_event(fs_info->async_submit_wait,
9543                    (atomic_read(&fs_info->nr_async_submits) == 0 &&
9544                     atomic_read(&fs_info->async_delalloc_pages) == 0));
9545         }
9546         atomic_dec(&fs_info->async_submit_draining);
9547 out:
9548         if (!list_empty_careful(&splice)) {
9549                 spin_lock(&fs_info->delalloc_root_lock);
9550                 list_splice_tail(&splice, &fs_info->delalloc_roots);
9551                 spin_unlock(&fs_info->delalloc_root_lock);
9552         }
9553         mutex_unlock(&fs_info->delalloc_root_mutex);
9554         return ret;
9555 }
9556
9557 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9558                          const char *symname)
9559 {
9560         struct btrfs_trans_handle *trans;
9561         struct btrfs_root *root = BTRFS_I(dir)->root;
9562         struct btrfs_path *path;
9563         struct btrfs_key key;
9564         struct inode *inode = NULL;
9565         int err;
9566         int drop_inode = 0;
9567         u64 objectid;
9568         u64 index = 0;
9569         int name_len;
9570         int datasize;
9571         unsigned long ptr;
9572         struct btrfs_file_extent_item *ei;
9573         struct extent_buffer *leaf;
9574
9575         name_len = strlen(symname);
9576         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
9577                 return -ENAMETOOLONG;
9578
9579         /*
9580          * 2 items for inode item and ref
9581          * 2 items for dir items
9582          * 1 item for xattr if selinux is on
9583          */
9584         trans = btrfs_start_transaction(root, 5);
9585         if (IS_ERR(trans))
9586                 return PTR_ERR(trans);
9587
9588         err = btrfs_find_free_ino(root, &objectid);
9589         if (err)
9590                 goto out_unlock;
9591
9592         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
9593                                 dentry->d_name.len, btrfs_ino(dir), objectid,
9594                                 S_IFLNK|S_IRWXUGO, &index);
9595         if (IS_ERR(inode)) {
9596                 err = PTR_ERR(inode);
9597                 goto out_unlock;
9598         }
9599
9600         /*
9601         * If the active LSM wants to access the inode during
9602         * d_instantiate it needs these. Smack checks to see
9603         * if the filesystem supports xattrs by looking at the
9604         * ops vector.
9605         */
9606         inode->i_fop = &btrfs_file_operations;
9607         inode->i_op = &btrfs_file_inode_operations;
9608         inode->i_mapping->a_ops = &btrfs_aops;
9609         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9610
9611         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
9612         if (err)
9613                 goto out_unlock_inode;
9614
9615         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
9616         if (err)
9617                 goto out_unlock_inode;
9618
9619         path = btrfs_alloc_path();
9620         if (!path) {
9621                 err = -ENOMEM;
9622                 goto out_unlock_inode;
9623         }
9624         key.objectid = btrfs_ino(inode);
9625         key.offset = 0;
9626         key.type = BTRFS_EXTENT_DATA_KEY;
9627         datasize = btrfs_file_extent_calc_inline_size(name_len);
9628         err = btrfs_insert_empty_item(trans, root, path, &key,
9629                                       datasize);
9630         if (err) {
9631                 btrfs_free_path(path);
9632                 goto out_unlock_inode;
9633         }
9634         leaf = path->nodes[0];
9635         ei = btrfs_item_ptr(leaf, path->slots[0],
9636                             struct btrfs_file_extent_item);
9637         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9638         btrfs_set_file_extent_type(leaf, ei,
9639                                    BTRFS_FILE_EXTENT_INLINE);
9640         btrfs_set_file_extent_encryption(leaf, ei, 0);
9641         btrfs_set_file_extent_compression(leaf, ei, 0);
9642         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9643         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9644
9645         ptr = btrfs_file_extent_inline_start(ei);
9646         write_extent_buffer(leaf, symname, ptr, name_len);
9647         btrfs_mark_buffer_dirty(leaf);
9648         btrfs_free_path(path);
9649
9650         inode->i_op = &btrfs_symlink_inode_operations;
9651         inode->i_mapping->a_ops = &btrfs_symlink_aops;
9652         inode_set_bytes(inode, name_len);
9653         btrfs_i_size_write(inode, name_len);
9654         err = btrfs_update_inode(trans, root, inode);
9655         if (err) {
9656                 drop_inode = 1;
9657                 goto out_unlock_inode;
9658         }
9659
9660         unlock_new_inode(inode);
9661         d_instantiate(dentry, inode);
9662
9663 out_unlock:
9664         btrfs_end_transaction(trans, root);
9665         if (drop_inode) {
9666                 inode_dec_link_count(inode);
9667                 iput(inode);
9668         }
9669         btrfs_btree_balance_dirty(root);
9670         return err;
9671
9672 out_unlock_inode:
9673         drop_inode = 1;
9674         unlock_new_inode(inode);
9675         goto out_unlock;
9676 }
9677
9678 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9679                                        u64 start, u64 num_bytes, u64 min_size,
9680                                        loff_t actual_len, u64 *alloc_hint,
9681                                        struct btrfs_trans_handle *trans)
9682 {
9683         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9684         struct extent_map *em;
9685         struct btrfs_root *root = BTRFS_I(inode)->root;
9686         struct btrfs_key ins;
9687         u64 cur_offset = start;
9688         u64 i_size;
9689         u64 cur_bytes;
9690         int ret = 0;
9691         bool own_trans = true;
9692
9693         if (trans)
9694                 own_trans = false;
9695         while (num_bytes > 0) {
9696                 if (own_trans) {
9697                         trans = btrfs_start_transaction(root, 3);
9698                         if (IS_ERR(trans)) {
9699                                 ret = PTR_ERR(trans);
9700                                 break;
9701                         }
9702                 }
9703
9704                 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
9705                 cur_bytes = max(cur_bytes, min_size);
9706                 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
9707                                            *alloc_hint, &ins, 1, 0);
9708                 if (ret) {
9709                         if (own_trans)
9710                                 btrfs_end_transaction(trans, root);
9711                         break;
9712                 }
9713
9714                 ret = insert_reserved_file_extent(trans, inode,
9715                                                   cur_offset, ins.objectid,
9716                                                   ins.offset, ins.offset,
9717                                                   ins.offset, 0, 0, 0,
9718                                                   BTRFS_FILE_EXTENT_PREALLOC);
9719                 if (ret) {
9720                         btrfs_free_reserved_extent(root, ins.objectid,
9721                                                    ins.offset, 0);
9722                         btrfs_abort_transaction(trans, root, ret);
9723                         if (own_trans)
9724                                 btrfs_end_transaction(trans, root);
9725                         break;
9726                 }
9727
9728                 btrfs_drop_extent_cache(inode, cur_offset,
9729                                         cur_offset + ins.offset -1, 0);
9730
9731                 em = alloc_extent_map();
9732                 if (!em) {
9733                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
9734                                 &BTRFS_I(inode)->runtime_flags);
9735                         goto next;
9736                 }
9737
9738                 em->start = cur_offset;
9739                 em->orig_start = cur_offset;
9740                 em->len = ins.offset;
9741                 em->block_start = ins.objectid;
9742                 em->block_len = ins.offset;
9743                 em->orig_block_len = ins.offset;
9744                 em->ram_bytes = ins.offset;
9745                 em->bdev = root->fs_info->fs_devices->latest_bdev;
9746                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9747                 em->generation = trans->transid;
9748
9749                 while (1) {
9750                         write_lock(&em_tree->lock);
9751                         ret = add_extent_mapping(em_tree, em, 1);
9752                         write_unlock(&em_tree->lock);
9753                         if (ret != -EEXIST)
9754                                 break;
9755                         btrfs_drop_extent_cache(inode, cur_offset,
9756                                                 cur_offset + ins.offset - 1,
9757                                                 0);
9758                 }
9759                 free_extent_map(em);
9760 next:
9761                 num_bytes -= ins.offset;
9762                 cur_offset += ins.offset;
9763                 *alloc_hint = ins.objectid + ins.offset;
9764
9765                 inode_inc_iversion(inode);
9766                 inode->i_ctime = CURRENT_TIME;
9767                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9768                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9769                     (actual_len > inode->i_size) &&
9770                     (cur_offset > inode->i_size)) {
9771                         if (cur_offset > actual_len)
9772                                 i_size = actual_len;
9773                         else
9774                                 i_size = cur_offset;
9775                         i_size_write(inode, i_size);
9776                         btrfs_ordered_update_i_size(inode, i_size, NULL);
9777                 }
9778
9779                 ret = btrfs_update_inode(trans, root, inode);
9780
9781                 if (ret) {
9782                         btrfs_abort_transaction(trans, root, ret);
9783                         if (own_trans)
9784                                 btrfs_end_transaction(trans, root);
9785                         break;
9786                 }
9787
9788                 if (own_trans)
9789                         btrfs_end_transaction(trans, root);
9790         }
9791         return ret;
9792 }
9793
9794 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9795                               u64 start, u64 num_bytes, u64 min_size,
9796                               loff_t actual_len, u64 *alloc_hint)
9797 {
9798         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9799                                            min_size, actual_len, alloc_hint,
9800                                            NULL);
9801 }
9802
9803 int btrfs_prealloc_file_range_trans(struct inode *inode,
9804                                     struct btrfs_trans_handle *trans, int mode,
9805                                     u64 start, u64 num_bytes, u64 min_size,
9806                                     loff_t actual_len, u64 *alloc_hint)
9807 {
9808         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9809                                            min_size, actual_len, alloc_hint, trans);
9810 }
9811
9812 static int btrfs_set_page_dirty(struct page *page)
9813 {
9814         return __set_page_dirty_nobuffers(page);
9815 }
9816
9817 static int btrfs_permission(struct inode *inode, int mask)
9818 {
9819         struct btrfs_root *root = BTRFS_I(inode)->root;
9820         umode_t mode = inode->i_mode;
9821
9822         if (mask & MAY_WRITE &&
9823             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9824                 if (btrfs_root_readonly(root))
9825                         return -EROFS;
9826                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9827                         return -EACCES;
9828         }
9829         return generic_permission(inode, mask);
9830 }
9831
9832 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
9833 {
9834         struct btrfs_trans_handle *trans;
9835         struct btrfs_root *root = BTRFS_I(dir)->root;
9836         struct inode *inode = NULL;
9837         u64 objectid;
9838         u64 index;
9839         int ret = 0;
9840
9841         /*
9842          * 5 units required for adding orphan entry
9843          */
9844         trans = btrfs_start_transaction(root, 5);
9845         if (IS_ERR(trans))
9846                 return PTR_ERR(trans);
9847
9848         ret = btrfs_find_free_ino(root, &objectid);
9849         if (ret)
9850                 goto out;
9851
9852         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
9853                                 btrfs_ino(dir), objectid, mode, &index);
9854         if (IS_ERR(inode)) {
9855                 ret = PTR_ERR(inode);
9856                 inode = NULL;
9857                 goto out;
9858         }
9859
9860         inode->i_fop = &btrfs_file_operations;
9861         inode->i_op = &btrfs_file_inode_operations;
9862
9863         inode->i_mapping->a_ops = &btrfs_aops;
9864         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9865
9866         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
9867         if (ret)
9868                 goto out_inode;
9869
9870         ret = btrfs_update_inode(trans, root, inode);
9871         if (ret)
9872                 goto out_inode;
9873         ret = btrfs_orphan_add(trans, inode);
9874         if (ret)
9875                 goto out_inode;
9876
9877         /*
9878          * We set number of links to 0 in btrfs_new_inode(), and here we set
9879          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
9880          * through:
9881          *
9882          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9883          */
9884         set_nlink(inode, 1);
9885         unlock_new_inode(inode);
9886         d_tmpfile(dentry, inode);
9887         mark_inode_dirty(inode);
9888
9889 out:
9890         btrfs_end_transaction(trans, root);
9891         if (ret)
9892                 iput(inode);
9893         btrfs_balance_delayed_items(root);
9894         btrfs_btree_balance_dirty(root);
9895         return ret;
9896
9897 out_inode:
9898         unlock_new_inode(inode);
9899         goto out;
9900
9901 }
9902
9903 /* Inspired by filemap_check_errors() */
9904 int btrfs_inode_check_errors(struct inode *inode)
9905 {
9906         int ret = 0;
9907
9908         if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
9909             test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
9910                 ret = -ENOSPC;
9911         if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
9912             test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
9913                 ret = -EIO;
9914
9915         return ret;
9916 }
9917
9918 static const struct inode_operations btrfs_dir_inode_operations = {
9919         .getattr        = btrfs_getattr,
9920         .lookup         = btrfs_lookup,
9921         .create         = btrfs_create,
9922         .unlink         = btrfs_unlink,
9923         .link           = btrfs_link,
9924         .mkdir          = btrfs_mkdir,
9925         .rmdir          = btrfs_rmdir,
9926         .rename2        = btrfs_rename2,
9927         .symlink        = btrfs_symlink,
9928         .setattr        = btrfs_setattr,
9929         .mknod          = btrfs_mknod,
9930         .setxattr       = btrfs_setxattr,
9931         .getxattr       = btrfs_getxattr,
9932         .listxattr      = btrfs_listxattr,
9933         .removexattr    = btrfs_removexattr,
9934         .permission     = btrfs_permission,
9935         .get_acl        = btrfs_get_acl,
9936         .set_acl        = btrfs_set_acl,
9937         .update_time    = btrfs_update_time,
9938         .tmpfile        = btrfs_tmpfile,
9939 };
9940 static const struct inode_operations btrfs_dir_ro_inode_operations = {
9941         .lookup         = btrfs_lookup,
9942         .permission     = btrfs_permission,
9943         .get_acl        = btrfs_get_acl,
9944         .set_acl        = btrfs_set_acl,
9945         .update_time    = btrfs_update_time,
9946 };
9947
9948 static const struct file_operations btrfs_dir_file_operations = {
9949         .llseek         = generic_file_llseek,
9950         .read           = generic_read_dir,
9951         .iterate        = btrfs_real_readdir,
9952         .unlocked_ioctl = btrfs_ioctl,
9953 #ifdef CONFIG_COMPAT
9954         .compat_ioctl   = btrfs_ioctl,
9955 #endif
9956         .release        = btrfs_release_file,
9957         .fsync          = btrfs_sync_file,
9958 };
9959
9960 static struct extent_io_ops btrfs_extent_io_ops = {
9961         .fill_delalloc = run_delalloc_range,
9962         .submit_bio_hook = btrfs_submit_bio_hook,
9963         .merge_bio_hook = btrfs_merge_bio_hook,
9964         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
9965         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
9966         .writepage_start_hook = btrfs_writepage_start_hook,
9967         .set_bit_hook = btrfs_set_bit_hook,
9968         .clear_bit_hook = btrfs_clear_bit_hook,
9969         .merge_extent_hook = btrfs_merge_extent_hook,
9970         .split_extent_hook = btrfs_split_extent_hook,
9971 };
9972
9973 /*
9974  * btrfs doesn't support the bmap operation because swapfiles
9975  * use bmap to make a mapping of extents in the file.  They assume
9976  * these extents won't change over the life of the file and they
9977  * use the bmap result to do IO directly to the drive.
9978  *
9979  * the btrfs bmap call would return logical addresses that aren't
9980  * suitable for IO and they also will change frequently as COW
9981  * operations happen.  So, swapfile + btrfs == corruption.
9982  *
9983  * For now we're avoiding this by dropping bmap.
9984  */
9985 static const struct address_space_operations btrfs_aops = {
9986         .readpage       = btrfs_readpage,
9987         .writepage      = btrfs_writepage,
9988         .writepages     = btrfs_writepages,
9989         .readpages      = btrfs_readpages,
9990         .direct_IO      = btrfs_direct_IO,
9991         .invalidatepage = btrfs_invalidatepage,
9992         .releasepage    = btrfs_releasepage,
9993         .set_page_dirty = btrfs_set_page_dirty,
9994         .error_remove_page = generic_error_remove_page,
9995 };
9996
9997 static const struct address_space_operations btrfs_symlink_aops = {
9998         .readpage       = btrfs_readpage,
9999         .writepage      = btrfs_writepage,
10000         .invalidatepage = btrfs_invalidatepage,
10001         .releasepage    = btrfs_releasepage,
10002 };
10003
10004 static const struct inode_operations btrfs_file_inode_operations = {
10005         .getattr        = btrfs_getattr,
10006         .setattr        = btrfs_setattr,
10007         .setxattr       = btrfs_setxattr,
10008         .getxattr       = btrfs_getxattr,
10009         .listxattr      = btrfs_listxattr,
10010         .removexattr    = btrfs_removexattr,
10011         .permission     = btrfs_permission,
10012         .fiemap         = btrfs_fiemap,
10013         .get_acl        = btrfs_get_acl,
10014         .set_acl        = btrfs_set_acl,
10015         .update_time    = btrfs_update_time,
10016 };
10017 static const struct inode_operations btrfs_special_inode_operations = {
10018         .getattr        = btrfs_getattr,
10019         .setattr        = btrfs_setattr,
10020         .permission     = btrfs_permission,
10021         .setxattr       = btrfs_setxattr,
10022         .getxattr       = btrfs_getxattr,
10023         .listxattr      = btrfs_listxattr,
10024         .removexattr    = btrfs_removexattr,
10025         .get_acl        = btrfs_get_acl,
10026         .set_acl        = btrfs_set_acl,
10027         .update_time    = btrfs_update_time,
10028 };
10029 static const struct inode_operations btrfs_symlink_inode_operations = {
10030         .readlink       = generic_readlink,
10031         .follow_link    = page_follow_link_light,
10032         .put_link       = page_put_link,
10033         .getattr        = btrfs_getattr,
10034         .setattr        = btrfs_setattr,
10035         .permission     = btrfs_permission,
10036         .setxattr       = btrfs_setxattr,
10037         .getxattr       = btrfs_getxattr,
10038         .listxattr      = btrfs_listxattr,
10039         .removexattr    = btrfs_removexattr,
10040         .update_time    = btrfs_update_time,
10041 };
10042
10043 const struct dentry_operations btrfs_dentry_operations = {
10044         .d_delete       = btrfs_dentry_delete,
10045         .d_release      = btrfs_dentry_release,
10046 };