btrfs: qgroup: Check if qgroup reserved space leaked
[cascardo/linux.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include <linux/btrfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/posix_acl_xattr.h>
45 #include <linux/uio.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 #include "hash.h"
61 #include "props.h"
62 #include "qgroup.h"
63
64 struct btrfs_iget_args {
65         struct btrfs_key *location;
66         struct btrfs_root *root;
67 };
68
69 static const struct inode_operations btrfs_dir_inode_operations;
70 static const struct inode_operations btrfs_symlink_inode_operations;
71 static const struct inode_operations btrfs_dir_ro_inode_operations;
72 static const struct inode_operations btrfs_special_inode_operations;
73 static const struct inode_operations btrfs_file_inode_operations;
74 static const struct address_space_operations btrfs_aops;
75 static const struct address_space_operations btrfs_symlink_aops;
76 static const struct file_operations btrfs_dir_file_operations;
77 static struct extent_io_ops btrfs_extent_io_ops;
78
79 static struct kmem_cache *btrfs_inode_cachep;
80 static struct kmem_cache *btrfs_delalloc_work_cachep;
81 struct kmem_cache *btrfs_trans_handle_cachep;
82 struct kmem_cache *btrfs_transaction_cachep;
83 struct kmem_cache *btrfs_path_cachep;
84 struct kmem_cache *btrfs_free_space_cachep;
85
86 #define S_SHIFT 12
87 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
88         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
89         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
90         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
91         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
92         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
93         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
94         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
95 };
96
97 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
98 static int btrfs_truncate(struct inode *inode);
99 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
100 static noinline int cow_file_range(struct inode *inode,
101                                    struct page *locked_page,
102                                    u64 start, u64 end, int *page_started,
103                                    unsigned long *nr_written, int unlock);
104 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
105                                            u64 len, u64 orig_start,
106                                            u64 block_start, u64 block_len,
107                                            u64 orig_block_len, u64 ram_bytes,
108                                            int type);
109
110 static int btrfs_dirty_inode(struct inode *inode);
111
112 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
113 void btrfs_test_inode_set_ops(struct inode *inode)
114 {
115         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
116 }
117 #endif
118
119 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
120                                      struct inode *inode,  struct inode *dir,
121                                      const struct qstr *qstr)
122 {
123         int err;
124
125         err = btrfs_init_acl(trans, inode, dir);
126         if (!err)
127                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
128         return err;
129 }
130
131 /*
132  * this does all the hard work for inserting an inline extent into
133  * the btree.  The caller should have done a btrfs_drop_extents so that
134  * no overlapping inline items exist in the btree
135  */
136 static int insert_inline_extent(struct btrfs_trans_handle *trans,
137                                 struct btrfs_path *path, int extent_inserted,
138                                 struct btrfs_root *root, struct inode *inode,
139                                 u64 start, size_t size, size_t compressed_size,
140                                 int compress_type,
141                                 struct page **compressed_pages)
142 {
143         struct extent_buffer *leaf;
144         struct page *page = NULL;
145         char *kaddr;
146         unsigned long ptr;
147         struct btrfs_file_extent_item *ei;
148         int err = 0;
149         int ret;
150         size_t cur_size = size;
151         unsigned long offset;
152
153         if (compressed_size && compressed_pages)
154                 cur_size = compressed_size;
155
156         inode_add_bytes(inode, size);
157
158         if (!extent_inserted) {
159                 struct btrfs_key key;
160                 size_t datasize;
161
162                 key.objectid = btrfs_ino(inode);
163                 key.offset = start;
164                 key.type = BTRFS_EXTENT_DATA_KEY;
165
166                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
167                 path->leave_spinning = 1;
168                 ret = btrfs_insert_empty_item(trans, root, path, &key,
169                                               datasize);
170                 if (ret) {
171                         err = ret;
172                         goto fail;
173                 }
174         }
175         leaf = path->nodes[0];
176         ei = btrfs_item_ptr(leaf, path->slots[0],
177                             struct btrfs_file_extent_item);
178         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
179         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
180         btrfs_set_file_extent_encryption(leaf, ei, 0);
181         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
182         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
183         ptr = btrfs_file_extent_inline_start(ei);
184
185         if (compress_type != BTRFS_COMPRESS_NONE) {
186                 struct page *cpage;
187                 int i = 0;
188                 while (compressed_size > 0) {
189                         cpage = compressed_pages[i];
190                         cur_size = min_t(unsigned long, compressed_size,
191                                        PAGE_CACHE_SIZE);
192
193                         kaddr = kmap_atomic(cpage);
194                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
195                         kunmap_atomic(kaddr);
196
197                         i++;
198                         ptr += cur_size;
199                         compressed_size -= cur_size;
200                 }
201                 btrfs_set_file_extent_compression(leaf, ei,
202                                                   compress_type);
203         } else {
204                 page = find_get_page(inode->i_mapping,
205                                      start >> PAGE_CACHE_SHIFT);
206                 btrfs_set_file_extent_compression(leaf, ei, 0);
207                 kaddr = kmap_atomic(page);
208                 offset = start & (PAGE_CACHE_SIZE - 1);
209                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
210                 kunmap_atomic(kaddr);
211                 page_cache_release(page);
212         }
213         btrfs_mark_buffer_dirty(leaf);
214         btrfs_release_path(path);
215
216         /*
217          * we're an inline extent, so nobody can
218          * extend the file past i_size without locking
219          * a page we already have locked.
220          *
221          * We must do any isize and inode updates
222          * before we unlock the pages.  Otherwise we
223          * could end up racing with unlink.
224          */
225         BTRFS_I(inode)->disk_i_size = inode->i_size;
226         ret = btrfs_update_inode(trans, root, inode);
227
228         return ret;
229 fail:
230         return err;
231 }
232
233
234 /*
235  * conditionally insert an inline extent into the file.  This
236  * does the checks required to make sure the data is small enough
237  * to fit as an inline extent.
238  */
239 static noinline int cow_file_range_inline(struct btrfs_root *root,
240                                           struct inode *inode, u64 start,
241                                           u64 end, size_t compressed_size,
242                                           int compress_type,
243                                           struct page **compressed_pages)
244 {
245         struct btrfs_trans_handle *trans;
246         u64 isize = i_size_read(inode);
247         u64 actual_end = min(end + 1, isize);
248         u64 inline_len = actual_end - start;
249         u64 aligned_end = ALIGN(end, root->sectorsize);
250         u64 data_len = inline_len;
251         int ret;
252         struct btrfs_path *path;
253         int extent_inserted = 0;
254         u32 extent_item_size;
255
256         if (compressed_size)
257                 data_len = compressed_size;
258
259         if (start > 0 ||
260             actual_end > PAGE_CACHE_SIZE ||
261             data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
262             (!compressed_size &&
263             (actual_end & (root->sectorsize - 1)) == 0) ||
264             end + 1 < isize ||
265             data_len > root->fs_info->max_inline) {
266                 return 1;
267         }
268
269         path = btrfs_alloc_path();
270         if (!path)
271                 return -ENOMEM;
272
273         trans = btrfs_join_transaction(root);
274         if (IS_ERR(trans)) {
275                 btrfs_free_path(path);
276                 return PTR_ERR(trans);
277         }
278         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
279
280         if (compressed_size && compressed_pages)
281                 extent_item_size = btrfs_file_extent_calc_inline_size(
282                    compressed_size);
283         else
284                 extent_item_size = btrfs_file_extent_calc_inline_size(
285                     inline_len);
286
287         ret = __btrfs_drop_extents(trans, root, inode, path,
288                                    start, aligned_end, NULL,
289                                    1, 1, extent_item_size, &extent_inserted);
290         if (ret) {
291                 btrfs_abort_transaction(trans, root, ret);
292                 goto out;
293         }
294
295         if (isize > actual_end)
296                 inline_len = min_t(u64, isize, actual_end);
297         ret = insert_inline_extent(trans, path, extent_inserted,
298                                    root, inode, start,
299                                    inline_len, compressed_size,
300                                    compress_type, compressed_pages);
301         if (ret && ret != -ENOSPC) {
302                 btrfs_abort_transaction(trans, root, ret);
303                 goto out;
304         } else if (ret == -ENOSPC) {
305                 ret = 1;
306                 goto out;
307         }
308
309         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
310         btrfs_delalloc_release_metadata(inode, end + 1 - start);
311         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
312 out:
313         /*
314          * Don't forget to free the reserved space, as for inlined extent
315          * it won't count as data extent, free them directly here.
316          * And at reserve time, it's always aligned to page size, so
317          * just free one page here.
318          */
319         btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE);
320         btrfs_free_path(path);
321         btrfs_end_transaction(trans, root);
322         return ret;
323 }
324
325 struct async_extent {
326         u64 start;
327         u64 ram_size;
328         u64 compressed_size;
329         struct page **pages;
330         unsigned long nr_pages;
331         int compress_type;
332         struct list_head list;
333 };
334
335 struct async_cow {
336         struct inode *inode;
337         struct btrfs_root *root;
338         struct page *locked_page;
339         u64 start;
340         u64 end;
341         struct list_head extents;
342         struct btrfs_work work;
343 };
344
345 static noinline int add_async_extent(struct async_cow *cow,
346                                      u64 start, u64 ram_size,
347                                      u64 compressed_size,
348                                      struct page **pages,
349                                      unsigned long nr_pages,
350                                      int compress_type)
351 {
352         struct async_extent *async_extent;
353
354         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
355         BUG_ON(!async_extent); /* -ENOMEM */
356         async_extent->start = start;
357         async_extent->ram_size = ram_size;
358         async_extent->compressed_size = compressed_size;
359         async_extent->pages = pages;
360         async_extent->nr_pages = nr_pages;
361         async_extent->compress_type = compress_type;
362         list_add_tail(&async_extent->list, &cow->extents);
363         return 0;
364 }
365
366 static inline int inode_need_compress(struct inode *inode)
367 {
368         struct btrfs_root *root = BTRFS_I(inode)->root;
369
370         /* force compress */
371         if (btrfs_test_opt(root, FORCE_COMPRESS))
372                 return 1;
373         /* bad compression ratios */
374         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
375                 return 0;
376         if (btrfs_test_opt(root, COMPRESS) ||
377             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
378             BTRFS_I(inode)->force_compress)
379                 return 1;
380         return 0;
381 }
382
383 /*
384  * we create compressed extents in two phases.  The first
385  * phase compresses a range of pages that have already been
386  * locked (both pages and state bits are locked).
387  *
388  * This is done inside an ordered work queue, and the compression
389  * is spread across many cpus.  The actual IO submission is step
390  * two, and the ordered work queue takes care of making sure that
391  * happens in the same order things were put onto the queue by
392  * writepages and friends.
393  *
394  * If this code finds it can't get good compression, it puts an
395  * entry onto the work queue to write the uncompressed bytes.  This
396  * makes sure that both compressed inodes and uncompressed inodes
397  * are written in the same order that the flusher thread sent them
398  * down.
399  */
400 static noinline void compress_file_range(struct inode *inode,
401                                         struct page *locked_page,
402                                         u64 start, u64 end,
403                                         struct async_cow *async_cow,
404                                         int *num_added)
405 {
406         struct btrfs_root *root = BTRFS_I(inode)->root;
407         u64 num_bytes;
408         u64 blocksize = root->sectorsize;
409         u64 actual_end;
410         u64 isize = i_size_read(inode);
411         int ret = 0;
412         struct page **pages = NULL;
413         unsigned long nr_pages;
414         unsigned long nr_pages_ret = 0;
415         unsigned long total_compressed = 0;
416         unsigned long total_in = 0;
417         unsigned long max_compressed = 128 * 1024;
418         unsigned long max_uncompressed = 128 * 1024;
419         int i;
420         int will_compress;
421         int compress_type = root->fs_info->compress_type;
422         int redirty = 0;
423
424         /* if this is a small write inside eof, kick off a defrag */
425         if ((end - start + 1) < 16 * 1024 &&
426             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
427                 btrfs_add_inode_defrag(NULL, inode);
428
429         actual_end = min_t(u64, isize, end + 1);
430 again:
431         will_compress = 0;
432         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
433         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
434
435         /*
436          * we don't want to send crud past the end of i_size through
437          * compression, that's just a waste of CPU time.  So, if the
438          * end of the file is before the start of our current
439          * requested range of bytes, we bail out to the uncompressed
440          * cleanup code that can deal with all of this.
441          *
442          * It isn't really the fastest way to fix things, but this is a
443          * very uncommon corner.
444          */
445         if (actual_end <= start)
446                 goto cleanup_and_bail_uncompressed;
447
448         total_compressed = actual_end - start;
449
450         /*
451          * skip compression for a small file range(<=blocksize) that
452          * isn't an inline extent, since it dosen't save disk space at all.
453          */
454         if (total_compressed <= blocksize &&
455            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
456                 goto cleanup_and_bail_uncompressed;
457
458         /* we want to make sure that amount of ram required to uncompress
459          * an extent is reasonable, so we limit the total size in ram
460          * of a compressed extent to 128k.  This is a crucial number
461          * because it also controls how easily we can spread reads across
462          * cpus for decompression.
463          *
464          * We also want to make sure the amount of IO required to do
465          * a random read is reasonably small, so we limit the size of
466          * a compressed extent to 128k.
467          */
468         total_compressed = min(total_compressed, max_uncompressed);
469         num_bytes = ALIGN(end - start + 1, blocksize);
470         num_bytes = max(blocksize,  num_bytes);
471         total_in = 0;
472         ret = 0;
473
474         /*
475          * we do compression for mount -o compress and when the
476          * inode has not been flagged as nocompress.  This flag can
477          * change at any time if we discover bad compression ratios.
478          */
479         if (inode_need_compress(inode)) {
480                 WARN_ON(pages);
481                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
482                 if (!pages) {
483                         /* just bail out to the uncompressed code */
484                         goto cont;
485                 }
486
487                 if (BTRFS_I(inode)->force_compress)
488                         compress_type = BTRFS_I(inode)->force_compress;
489
490                 /*
491                  * we need to call clear_page_dirty_for_io on each
492                  * page in the range.  Otherwise applications with the file
493                  * mmap'd can wander in and change the page contents while
494                  * we are compressing them.
495                  *
496                  * If the compression fails for any reason, we set the pages
497                  * dirty again later on.
498                  */
499                 extent_range_clear_dirty_for_io(inode, start, end);
500                 redirty = 1;
501                 ret = btrfs_compress_pages(compress_type,
502                                            inode->i_mapping, start,
503                                            total_compressed, pages,
504                                            nr_pages, &nr_pages_ret,
505                                            &total_in,
506                                            &total_compressed,
507                                            max_compressed);
508
509                 if (!ret) {
510                         unsigned long offset = total_compressed &
511                                 (PAGE_CACHE_SIZE - 1);
512                         struct page *page = pages[nr_pages_ret - 1];
513                         char *kaddr;
514
515                         /* zero the tail end of the last page, we might be
516                          * sending it down to disk
517                          */
518                         if (offset) {
519                                 kaddr = kmap_atomic(page);
520                                 memset(kaddr + offset, 0,
521                                        PAGE_CACHE_SIZE - offset);
522                                 kunmap_atomic(kaddr);
523                         }
524                         will_compress = 1;
525                 }
526         }
527 cont:
528         if (start == 0) {
529                 /* lets try to make an inline extent */
530                 if (ret || total_in < (actual_end - start)) {
531                         /* we didn't compress the entire range, try
532                          * to make an uncompressed inline extent.
533                          */
534                         ret = cow_file_range_inline(root, inode, start, end,
535                                                     0, 0, NULL);
536                 } else {
537                         /* try making a compressed inline extent */
538                         ret = cow_file_range_inline(root, inode, start, end,
539                                                     total_compressed,
540                                                     compress_type, pages);
541                 }
542                 if (ret <= 0) {
543                         unsigned long clear_flags = EXTENT_DELALLOC |
544                                 EXTENT_DEFRAG;
545                         unsigned long page_error_op;
546
547                         clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
548                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
549
550                         /*
551                          * inline extent creation worked or returned error,
552                          * we don't need to create any more async work items.
553                          * Unlock and free up our temp pages.
554                          */
555                         extent_clear_unlock_delalloc(inode, start, end, NULL,
556                                                      clear_flags, PAGE_UNLOCK |
557                                                      PAGE_CLEAR_DIRTY |
558                                                      PAGE_SET_WRITEBACK |
559                                                      page_error_op |
560                                                      PAGE_END_WRITEBACK);
561                         goto free_pages_out;
562                 }
563         }
564
565         if (will_compress) {
566                 /*
567                  * we aren't doing an inline extent round the compressed size
568                  * up to a block size boundary so the allocator does sane
569                  * things
570                  */
571                 total_compressed = ALIGN(total_compressed, blocksize);
572
573                 /*
574                  * one last check to make sure the compression is really a
575                  * win, compare the page count read with the blocks on disk
576                  */
577                 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
578                 if (total_compressed >= total_in) {
579                         will_compress = 0;
580                 } else {
581                         num_bytes = total_in;
582                 }
583         }
584         if (!will_compress && pages) {
585                 /*
586                  * the compression code ran but failed to make things smaller,
587                  * free any pages it allocated and our page pointer array
588                  */
589                 for (i = 0; i < nr_pages_ret; i++) {
590                         WARN_ON(pages[i]->mapping);
591                         page_cache_release(pages[i]);
592                 }
593                 kfree(pages);
594                 pages = NULL;
595                 total_compressed = 0;
596                 nr_pages_ret = 0;
597
598                 /* flag the file so we don't compress in the future */
599                 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
600                     !(BTRFS_I(inode)->force_compress)) {
601                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
602                 }
603         }
604         if (will_compress) {
605                 *num_added += 1;
606
607                 /* the async work queues will take care of doing actual
608                  * allocation on disk for these compressed pages,
609                  * and will submit them to the elevator.
610                  */
611                 add_async_extent(async_cow, start, num_bytes,
612                                  total_compressed, pages, nr_pages_ret,
613                                  compress_type);
614
615                 if (start + num_bytes < end) {
616                         start += num_bytes;
617                         pages = NULL;
618                         cond_resched();
619                         goto again;
620                 }
621         } else {
622 cleanup_and_bail_uncompressed:
623                 /*
624                  * No compression, but we still need to write the pages in
625                  * the file we've been given so far.  redirty the locked
626                  * page if it corresponds to our extent and set things up
627                  * for the async work queue to run cow_file_range to do
628                  * the normal delalloc dance
629                  */
630                 if (page_offset(locked_page) >= start &&
631                     page_offset(locked_page) <= end) {
632                         __set_page_dirty_nobuffers(locked_page);
633                         /* unlocked later on in the async handlers */
634                 }
635                 if (redirty)
636                         extent_range_redirty_for_io(inode, start, end);
637                 add_async_extent(async_cow, start, end - start + 1,
638                                  0, NULL, 0, BTRFS_COMPRESS_NONE);
639                 *num_added += 1;
640         }
641
642         return;
643
644 free_pages_out:
645         for (i = 0; i < nr_pages_ret; i++) {
646                 WARN_ON(pages[i]->mapping);
647                 page_cache_release(pages[i]);
648         }
649         kfree(pages);
650 }
651
652 static void free_async_extent_pages(struct async_extent *async_extent)
653 {
654         int i;
655
656         if (!async_extent->pages)
657                 return;
658
659         for (i = 0; i < async_extent->nr_pages; i++) {
660                 WARN_ON(async_extent->pages[i]->mapping);
661                 page_cache_release(async_extent->pages[i]);
662         }
663         kfree(async_extent->pages);
664         async_extent->nr_pages = 0;
665         async_extent->pages = NULL;
666 }
667
668 /*
669  * phase two of compressed writeback.  This is the ordered portion
670  * of the code, which only gets called in the order the work was
671  * queued.  We walk all the async extents created by compress_file_range
672  * and send them down to the disk.
673  */
674 static noinline void submit_compressed_extents(struct inode *inode,
675                                               struct async_cow *async_cow)
676 {
677         struct async_extent *async_extent;
678         u64 alloc_hint = 0;
679         struct btrfs_key ins;
680         struct extent_map *em;
681         struct btrfs_root *root = BTRFS_I(inode)->root;
682         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
683         struct extent_io_tree *io_tree;
684         int ret = 0;
685
686 again:
687         while (!list_empty(&async_cow->extents)) {
688                 async_extent = list_entry(async_cow->extents.next,
689                                           struct async_extent, list);
690                 list_del(&async_extent->list);
691
692                 io_tree = &BTRFS_I(inode)->io_tree;
693
694 retry:
695                 /* did the compression code fall back to uncompressed IO? */
696                 if (!async_extent->pages) {
697                         int page_started = 0;
698                         unsigned long nr_written = 0;
699
700                         lock_extent(io_tree, async_extent->start,
701                                          async_extent->start +
702                                          async_extent->ram_size - 1);
703
704                         /* allocate blocks */
705                         ret = cow_file_range(inode, async_cow->locked_page,
706                                              async_extent->start,
707                                              async_extent->start +
708                                              async_extent->ram_size - 1,
709                                              &page_started, &nr_written, 0);
710
711                         /* JDM XXX */
712
713                         /*
714                          * if page_started, cow_file_range inserted an
715                          * inline extent and took care of all the unlocking
716                          * and IO for us.  Otherwise, we need to submit
717                          * all those pages down to the drive.
718                          */
719                         if (!page_started && !ret)
720                                 extent_write_locked_range(io_tree,
721                                                   inode, async_extent->start,
722                                                   async_extent->start +
723                                                   async_extent->ram_size - 1,
724                                                   btrfs_get_extent,
725                                                   WB_SYNC_ALL);
726                         else if (ret)
727                                 unlock_page(async_cow->locked_page);
728                         kfree(async_extent);
729                         cond_resched();
730                         continue;
731                 }
732
733                 lock_extent(io_tree, async_extent->start,
734                             async_extent->start + async_extent->ram_size - 1);
735
736                 ret = btrfs_reserve_extent(root,
737                                            async_extent->compressed_size,
738                                            async_extent->compressed_size,
739                                            0, alloc_hint, &ins, 1, 1);
740                 if (ret) {
741                         free_async_extent_pages(async_extent);
742
743                         if (ret == -ENOSPC) {
744                                 unlock_extent(io_tree, async_extent->start,
745                                               async_extent->start +
746                                               async_extent->ram_size - 1);
747
748                                 /*
749                                  * we need to redirty the pages if we decide to
750                                  * fallback to uncompressed IO, otherwise we
751                                  * will not submit these pages down to lower
752                                  * layers.
753                                  */
754                                 extent_range_redirty_for_io(inode,
755                                                 async_extent->start,
756                                                 async_extent->start +
757                                                 async_extent->ram_size - 1);
758
759                                 goto retry;
760                         }
761                         goto out_free;
762                 }
763                 /*
764                  * here we're doing allocation and writeback of the
765                  * compressed pages
766                  */
767                 btrfs_drop_extent_cache(inode, async_extent->start,
768                                         async_extent->start +
769                                         async_extent->ram_size - 1, 0);
770
771                 em = alloc_extent_map();
772                 if (!em) {
773                         ret = -ENOMEM;
774                         goto out_free_reserve;
775                 }
776                 em->start = async_extent->start;
777                 em->len = async_extent->ram_size;
778                 em->orig_start = em->start;
779                 em->mod_start = em->start;
780                 em->mod_len = em->len;
781
782                 em->block_start = ins.objectid;
783                 em->block_len = ins.offset;
784                 em->orig_block_len = ins.offset;
785                 em->ram_bytes = async_extent->ram_size;
786                 em->bdev = root->fs_info->fs_devices->latest_bdev;
787                 em->compress_type = async_extent->compress_type;
788                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
789                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
790                 em->generation = -1;
791
792                 while (1) {
793                         write_lock(&em_tree->lock);
794                         ret = add_extent_mapping(em_tree, em, 1);
795                         write_unlock(&em_tree->lock);
796                         if (ret != -EEXIST) {
797                                 free_extent_map(em);
798                                 break;
799                         }
800                         btrfs_drop_extent_cache(inode, async_extent->start,
801                                                 async_extent->start +
802                                                 async_extent->ram_size - 1, 0);
803                 }
804
805                 if (ret)
806                         goto out_free_reserve;
807
808                 ret = btrfs_add_ordered_extent_compress(inode,
809                                                 async_extent->start,
810                                                 ins.objectid,
811                                                 async_extent->ram_size,
812                                                 ins.offset,
813                                                 BTRFS_ORDERED_COMPRESSED,
814                                                 async_extent->compress_type);
815                 if (ret) {
816                         btrfs_drop_extent_cache(inode, async_extent->start,
817                                                 async_extent->start +
818                                                 async_extent->ram_size - 1, 0);
819                         goto out_free_reserve;
820                 }
821
822                 /*
823                  * clear dirty, set writeback and unlock the pages.
824                  */
825                 extent_clear_unlock_delalloc(inode, async_extent->start,
826                                 async_extent->start +
827                                 async_extent->ram_size - 1,
828                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
829                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
830                                 PAGE_SET_WRITEBACK);
831                 ret = btrfs_submit_compressed_write(inode,
832                                     async_extent->start,
833                                     async_extent->ram_size,
834                                     ins.objectid,
835                                     ins.offset, async_extent->pages,
836                                     async_extent->nr_pages);
837                 if (ret) {
838                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
839                         struct page *p = async_extent->pages[0];
840                         const u64 start = async_extent->start;
841                         const u64 end = start + async_extent->ram_size - 1;
842
843                         p->mapping = inode->i_mapping;
844                         tree->ops->writepage_end_io_hook(p, start, end,
845                                                          NULL, 0);
846                         p->mapping = NULL;
847                         extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
848                                                      PAGE_END_WRITEBACK |
849                                                      PAGE_SET_ERROR);
850                         free_async_extent_pages(async_extent);
851                 }
852                 alloc_hint = ins.objectid + ins.offset;
853                 kfree(async_extent);
854                 cond_resched();
855         }
856         return;
857 out_free_reserve:
858         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
859 out_free:
860         extent_clear_unlock_delalloc(inode, async_extent->start,
861                                      async_extent->start +
862                                      async_extent->ram_size - 1,
863                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
864                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
865                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
866                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
867                                      PAGE_SET_ERROR);
868         free_async_extent_pages(async_extent);
869         kfree(async_extent);
870         goto again;
871 }
872
873 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
874                                       u64 num_bytes)
875 {
876         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
877         struct extent_map *em;
878         u64 alloc_hint = 0;
879
880         read_lock(&em_tree->lock);
881         em = search_extent_mapping(em_tree, start, num_bytes);
882         if (em) {
883                 /*
884                  * if block start isn't an actual block number then find the
885                  * first block in this inode and use that as a hint.  If that
886                  * block is also bogus then just don't worry about it.
887                  */
888                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
889                         free_extent_map(em);
890                         em = search_extent_mapping(em_tree, 0, 0);
891                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
892                                 alloc_hint = em->block_start;
893                         if (em)
894                                 free_extent_map(em);
895                 } else {
896                         alloc_hint = em->block_start;
897                         free_extent_map(em);
898                 }
899         }
900         read_unlock(&em_tree->lock);
901
902         return alloc_hint;
903 }
904
905 /*
906  * when extent_io.c finds a delayed allocation range in the file,
907  * the call backs end up in this code.  The basic idea is to
908  * allocate extents on disk for the range, and create ordered data structs
909  * in ram to track those extents.
910  *
911  * locked_page is the page that writepage had locked already.  We use
912  * it to make sure we don't do extra locks or unlocks.
913  *
914  * *page_started is set to one if we unlock locked_page and do everything
915  * required to start IO on it.  It may be clean and already done with
916  * IO when we return.
917  */
918 static noinline int cow_file_range(struct inode *inode,
919                                    struct page *locked_page,
920                                    u64 start, u64 end, int *page_started,
921                                    unsigned long *nr_written,
922                                    int unlock)
923 {
924         struct btrfs_root *root = BTRFS_I(inode)->root;
925         u64 alloc_hint = 0;
926         u64 num_bytes;
927         unsigned long ram_size;
928         u64 disk_num_bytes;
929         u64 cur_alloc_size;
930         u64 blocksize = root->sectorsize;
931         struct btrfs_key ins;
932         struct extent_map *em;
933         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
934         int ret = 0;
935
936         if (btrfs_is_free_space_inode(inode)) {
937                 WARN_ON_ONCE(1);
938                 ret = -EINVAL;
939                 goto out_unlock;
940         }
941
942         num_bytes = ALIGN(end - start + 1, blocksize);
943         num_bytes = max(blocksize,  num_bytes);
944         disk_num_bytes = num_bytes;
945
946         /* if this is a small write inside eof, kick off defrag */
947         if (num_bytes < 64 * 1024 &&
948             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
949                 btrfs_add_inode_defrag(NULL, inode);
950
951         if (start == 0) {
952                 /* lets try to make an inline extent */
953                 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
954                                             NULL);
955                 if (ret == 0) {
956                         extent_clear_unlock_delalloc(inode, start, end, NULL,
957                                      EXTENT_LOCKED | EXTENT_DELALLOC |
958                                      EXTENT_DEFRAG, PAGE_UNLOCK |
959                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
960                                      PAGE_END_WRITEBACK);
961
962                         *nr_written = *nr_written +
963                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
964                         *page_started = 1;
965                         goto out;
966                 } else if (ret < 0) {
967                         goto out_unlock;
968                 }
969         }
970
971         BUG_ON(disk_num_bytes >
972                btrfs_super_total_bytes(root->fs_info->super_copy));
973
974         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
975         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
976
977         while (disk_num_bytes > 0) {
978                 unsigned long op;
979
980                 cur_alloc_size = disk_num_bytes;
981                 ret = btrfs_reserve_extent(root, cur_alloc_size,
982                                            root->sectorsize, 0, alloc_hint,
983                                            &ins, 1, 1);
984                 if (ret < 0)
985                         goto out_unlock;
986
987                 em = alloc_extent_map();
988                 if (!em) {
989                         ret = -ENOMEM;
990                         goto out_reserve;
991                 }
992                 em->start = start;
993                 em->orig_start = em->start;
994                 ram_size = ins.offset;
995                 em->len = ins.offset;
996                 em->mod_start = em->start;
997                 em->mod_len = em->len;
998
999                 em->block_start = ins.objectid;
1000                 em->block_len = ins.offset;
1001                 em->orig_block_len = ins.offset;
1002                 em->ram_bytes = ram_size;
1003                 em->bdev = root->fs_info->fs_devices->latest_bdev;
1004                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1005                 em->generation = -1;
1006
1007                 while (1) {
1008                         write_lock(&em_tree->lock);
1009                         ret = add_extent_mapping(em_tree, em, 1);
1010                         write_unlock(&em_tree->lock);
1011                         if (ret != -EEXIST) {
1012                                 free_extent_map(em);
1013                                 break;
1014                         }
1015                         btrfs_drop_extent_cache(inode, start,
1016                                                 start + ram_size - 1, 0);
1017                 }
1018                 if (ret)
1019                         goto out_reserve;
1020
1021                 cur_alloc_size = ins.offset;
1022                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1023                                                ram_size, cur_alloc_size, 0);
1024                 if (ret)
1025                         goto out_drop_extent_cache;
1026
1027                 if (root->root_key.objectid ==
1028                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1029                         ret = btrfs_reloc_clone_csums(inode, start,
1030                                                       cur_alloc_size);
1031                         if (ret)
1032                                 goto out_drop_extent_cache;
1033                 }
1034
1035                 if (disk_num_bytes < cur_alloc_size)
1036                         break;
1037
1038                 /* we're not doing compressed IO, don't unlock the first
1039                  * page (which the caller expects to stay locked), don't
1040                  * clear any dirty bits and don't set any writeback bits
1041                  *
1042                  * Do set the Private2 bit so we know this page was properly
1043                  * setup for writepage
1044                  */
1045                 op = unlock ? PAGE_UNLOCK : 0;
1046                 op |= PAGE_SET_PRIVATE2;
1047
1048                 extent_clear_unlock_delalloc(inode, start,
1049                                              start + ram_size - 1, locked_page,
1050                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1051                                              op);
1052                 disk_num_bytes -= cur_alloc_size;
1053                 num_bytes -= cur_alloc_size;
1054                 alloc_hint = ins.objectid + ins.offset;
1055                 start += cur_alloc_size;
1056         }
1057 out:
1058         return ret;
1059
1060 out_drop_extent_cache:
1061         btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1062 out_reserve:
1063         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
1064 out_unlock:
1065         extent_clear_unlock_delalloc(inode, start, end, locked_page,
1066                                      EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1067                                      EXTENT_DELALLOC | EXTENT_DEFRAG,
1068                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1069                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1070         goto out;
1071 }
1072
1073 /*
1074  * work queue call back to started compression on a file and pages
1075  */
1076 static noinline void async_cow_start(struct btrfs_work *work)
1077 {
1078         struct async_cow *async_cow;
1079         int num_added = 0;
1080         async_cow = container_of(work, struct async_cow, work);
1081
1082         compress_file_range(async_cow->inode, async_cow->locked_page,
1083                             async_cow->start, async_cow->end, async_cow,
1084                             &num_added);
1085         if (num_added == 0) {
1086                 btrfs_add_delayed_iput(async_cow->inode);
1087                 async_cow->inode = NULL;
1088         }
1089 }
1090
1091 /*
1092  * work queue call back to submit previously compressed pages
1093  */
1094 static noinline void async_cow_submit(struct btrfs_work *work)
1095 {
1096         struct async_cow *async_cow;
1097         struct btrfs_root *root;
1098         unsigned long nr_pages;
1099
1100         async_cow = container_of(work, struct async_cow, work);
1101
1102         root = async_cow->root;
1103         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1104                 PAGE_CACHE_SHIFT;
1105
1106         /*
1107          * atomic_sub_return implies a barrier for waitqueue_active
1108          */
1109         if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1110             5 * 1024 * 1024 &&
1111             waitqueue_active(&root->fs_info->async_submit_wait))
1112                 wake_up(&root->fs_info->async_submit_wait);
1113
1114         if (async_cow->inode)
1115                 submit_compressed_extents(async_cow->inode, async_cow);
1116 }
1117
1118 static noinline void async_cow_free(struct btrfs_work *work)
1119 {
1120         struct async_cow *async_cow;
1121         async_cow = container_of(work, struct async_cow, work);
1122         if (async_cow->inode)
1123                 btrfs_add_delayed_iput(async_cow->inode);
1124         kfree(async_cow);
1125 }
1126
1127 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1128                                 u64 start, u64 end, int *page_started,
1129                                 unsigned long *nr_written)
1130 {
1131         struct async_cow *async_cow;
1132         struct btrfs_root *root = BTRFS_I(inode)->root;
1133         unsigned long nr_pages;
1134         u64 cur_end;
1135         int limit = 10 * 1024 * 1024;
1136
1137         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1138                          1, 0, NULL, GFP_NOFS);
1139         while (start < end) {
1140                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1141                 BUG_ON(!async_cow); /* -ENOMEM */
1142                 async_cow->inode = igrab(inode);
1143                 async_cow->root = root;
1144                 async_cow->locked_page = locked_page;
1145                 async_cow->start = start;
1146
1147                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1148                     !btrfs_test_opt(root, FORCE_COMPRESS))
1149                         cur_end = end;
1150                 else
1151                         cur_end = min(end, start + 512 * 1024 - 1);
1152
1153                 async_cow->end = cur_end;
1154                 INIT_LIST_HEAD(&async_cow->extents);
1155
1156                 btrfs_init_work(&async_cow->work,
1157                                 btrfs_delalloc_helper,
1158                                 async_cow_start, async_cow_submit,
1159                                 async_cow_free);
1160
1161                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1162                         PAGE_CACHE_SHIFT;
1163                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1164
1165                 btrfs_queue_work(root->fs_info->delalloc_workers,
1166                                  &async_cow->work);
1167
1168                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1169                         wait_event(root->fs_info->async_submit_wait,
1170                            (atomic_read(&root->fs_info->async_delalloc_pages) <
1171                             limit));
1172                 }
1173
1174                 while (atomic_read(&root->fs_info->async_submit_draining) &&
1175                       atomic_read(&root->fs_info->async_delalloc_pages)) {
1176                         wait_event(root->fs_info->async_submit_wait,
1177                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
1178                            0));
1179                 }
1180
1181                 *nr_written += nr_pages;
1182                 start = cur_end + 1;
1183         }
1184         *page_started = 1;
1185         return 0;
1186 }
1187
1188 static noinline int csum_exist_in_range(struct btrfs_root *root,
1189                                         u64 bytenr, u64 num_bytes)
1190 {
1191         int ret;
1192         struct btrfs_ordered_sum *sums;
1193         LIST_HEAD(list);
1194
1195         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1196                                        bytenr + num_bytes - 1, &list, 0);
1197         if (ret == 0 && list_empty(&list))
1198                 return 0;
1199
1200         while (!list_empty(&list)) {
1201                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1202                 list_del(&sums->list);
1203                 kfree(sums);
1204         }
1205         return 1;
1206 }
1207
1208 /*
1209  * when nowcow writeback call back.  This checks for snapshots or COW copies
1210  * of the extents that exist in the file, and COWs the file as required.
1211  *
1212  * If no cow copies or snapshots exist, we write directly to the existing
1213  * blocks on disk
1214  */
1215 static noinline int run_delalloc_nocow(struct inode *inode,
1216                                        struct page *locked_page,
1217                               u64 start, u64 end, int *page_started, int force,
1218                               unsigned long *nr_written)
1219 {
1220         struct btrfs_root *root = BTRFS_I(inode)->root;
1221         struct btrfs_trans_handle *trans;
1222         struct extent_buffer *leaf;
1223         struct btrfs_path *path;
1224         struct btrfs_file_extent_item *fi;
1225         struct btrfs_key found_key;
1226         u64 cow_start;
1227         u64 cur_offset;
1228         u64 extent_end;
1229         u64 extent_offset;
1230         u64 disk_bytenr;
1231         u64 num_bytes;
1232         u64 disk_num_bytes;
1233         u64 ram_bytes;
1234         int extent_type;
1235         int ret, err;
1236         int type;
1237         int nocow;
1238         int check_prev = 1;
1239         bool nolock;
1240         u64 ino = btrfs_ino(inode);
1241
1242         path = btrfs_alloc_path();
1243         if (!path) {
1244                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1245                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1246                                              EXTENT_DO_ACCOUNTING |
1247                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1248                                              PAGE_CLEAR_DIRTY |
1249                                              PAGE_SET_WRITEBACK |
1250                                              PAGE_END_WRITEBACK);
1251                 return -ENOMEM;
1252         }
1253
1254         nolock = btrfs_is_free_space_inode(inode);
1255
1256         if (nolock)
1257                 trans = btrfs_join_transaction_nolock(root);
1258         else
1259                 trans = btrfs_join_transaction(root);
1260
1261         if (IS_ERR(trans)) {
1262                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1263                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1264                                              EXTENT_DO_ACCOUNTING |
1265                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1266                                              PAGE_CLEAR_DIRTY |
1267                                              PAGE_SET_WRITEBACK |
1268                                              PAGE_END_WRITEBACK);
1269                 btrfs_free_path(path);
1270                 return PTR_ERR(trans);
1271         }
1272
1273         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1274
1275         cow_start = (u64)-1;
1276         cur_offset = start;
1277         while (1) {
1278                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1279                                                cur_offset, 0);
1280                 if (ret < 0)
1281                         goto error;
1282                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1283                         leaf = path->nodes[0];
1284                         btrfs_item_key_to_cpu(leaf, &found_key,
1285                                               path->slots[0] - 1);
1286                         if (found_key.objectid == ino &&
1287                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1288                                 path->slots[0]--;
1289                 }
1290                 check_prev = 0;
1291 next_slot:
1292                 leaf = path->nodes[0];
1293                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1294                         ret = btrfs_next_leaf(root, path);
1295                         if (ret < 0)
1296                                 goto error;
1297                         if (ret > 0)
1298                                 break;
1299                         leaf = path->nodes[0];
1300                 }
1301
1302                 nocow = 0;
1303                 disk_bytenr = 0;
1304                 num_bytes = 0;
1305                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1306
1307                 if (found_key.objectid > ino ||
1308                     found_key.type > BTRFS_EXTENT_DATA_KEY ||
1309                     found_key.offset > end)
1310                         break;
1311
1312                 if (found_key.offset > cur_offset) {
1313                         extent_end = found_key.offset;
1314                         extent_type = 0;
1315                         goto out_check;
1316                 }
1317
1318                 fi = btrfs_item_ptr(leaf, path->slots[0],
1319                                     struct btrfs_file_extent_item);
1320                 extent_type = btrfs_file_extent_type(leaf, fi);
1321
1322                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1323                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1324                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1325                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1326                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1327                         extent_end = found_key.offset +
1328                                 btrfs_file_extent_num_bytes(leaf, fi);
1329                         disk_num_bytes =
1330                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1331                         if (extent_end <= start) {
1332                                 path->slots[0]++;
1333                                 goto next_slot;
1334                         }
1335                         if (disk_bytenr == 0)
1336                                 goto out_check;
1337                         if (btrfs_file_extent_compression(leaf, fi) ||
1338                             btrfs_file_extent_encryption(leaf, fi) ||
1339                             btrfs_file_extent_other_encoding(leaf, fi))
1340                                 goto out_check;
1341                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1342                                 goto out_check;
1343                         if (btrfs_extent_readonly(root, disk_bytenr))
1344                                 goto out_check;
1345                         if (btrfs_cross_ref_exist(trans, root, ino,
1346                                                   found_key.offset -
1347                                                   extent_offset, disk_bytenr))
1348                                 goto out_check;
1349                         disk_bytenr += extent_offset;
1350                         disk_bytenr += cur_offset - found_key.offset;
1351                         num_bytes = min(end + 1, extent_end) - cur_offset;
1352                         /*
1353                          * if there are pending snapshots for this root,
1354                          * we fall into common COW way.
1355                          */
1356                         if (!nolock) {
1357                                 err = btrfs_start_write_no_snapshoting(root);
1358                                 if (!err)
1359                                         goto out_check;
1360                         }
1361                         /*
1362                          * force cow if csum exists in the range.
1363                          * this ensure that csum for a given extent are
1364                          * either valid or do not exist.
1365                          */
1366                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1367                                 goto out_check;
1368                         nocow = 1;
1369                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1370                         extent_end = found_key.offset +
1371                                 btrfs_file_extent_inline_len(leaf,
1372                                                      path->slots[0], fi);
1373                         extent_end = ALIGN(extent_end, root->sectorsize);
1374                 } else {
1375                         BUG_ON(1);
1376                 }
1377 out_check:
1378                 if (extent_end <= start) {
1379                         path->slots[0]++;
1380                         if (!nolock && nocow)
1381                                 btrfs_end_write_no_snapshoting(root);
1382                         goto next_slot;
1383                 }
1384                 if (!nocow) {
1385                         if (cow_start == (u64)-1)
1386                                 cow_start = cur_offset;
1387                         cur_offset = extent_end;
1388                         if (cur_offset > end)
1389                                 break;
1390                         path->slots[0]++;
1391                         goto next_slot;
1392                 }
1393
1394                 btrfs_release_path(path);
1395                 if (cow_start != (u64)-1) {
1396                         ret = cow_file_range(inode, locked_page,
1397                                              cow_start, found_key.offset - 1,
1398                                              page_started, nr_written, 1);
1399                         if (ret) {
1400                                 if (!nolock && nocow)
1401                                         btrfs_end_write_no_snapshoting(root);
1402                                 goto error;
1403                         }
1404                         cow_start = (u64)-1;
1405                 }
1406
1407                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1408                         struct extent_map *em;
1409                         struct extent_map_tree *em_tree;
1410                         em_tree = &BTRFS_I(inode)->extent_tree;
1411                         em = alloc_extent_map();
1412                         BUG_ON(!em); /* -ENOMEM */
1413                         em->start = cur_offset;
1414                         em->orig_start = found_key.offset - extent_offset;
1415                         em->len = num_bytes;
1416                         em->block_len = num_bytes;
1417                         em->block_start = disk_bytenr;
1418                         em->orig_block_len = disk_num_bytes;
1419                         em->ram_bytes = ram_bytes;
1420                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1421                         em->mod_start = em->start;
1422                         em->mod_len = em->len;
1423                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1424                         set_bit(EXTENT_FLAG_FILLING, &em->flags);
1425                         em->generation = -1;
1426                         while (1) {
1427                                 write_lock(&em_tree->lock);
1428                                 ret = add_extent_mapping(em_tree, em, 1);
1429                                 write_unlock(&em_tree->lock);
1430                                 if (ret != -EEXIST) {
1431                                         free_extent_map(em);
1432                                         break;
1433                                 }
1434                                 btrfs_drop_extent_cache(inode, em->start,
1435                                                 em->start + em->len - 1, 0);
1436                         }
1437                         type = BTRFS_ORDERED_PREALLOC;
1438                 } else {
1439                         type = BTRFS_ORDERED_NOCOW;
1440                 }
1441
1442                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1443                                                num_bytes, num_bytes, type);
1444                 BUG_ON(ret); /* -ENOMEM */
1445
1446                 if (root->root_key.objectid ==
1447                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1448                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1449                                                       num_bytes);
1450                         if (ret) {
1451                                 if (!nolock && nocow)
1452                                         btrfs_end_write_no_snapshoting(root);
1453                                 goto error;
1454                         }
1455                 }
1456
1457                 extent_clear_unlock_delalloc(inode, cur_offset,
1458                                              cur_offset + num_bytes - 1,
1459                                              locked_page, EXTENT_LOCKED |
1460                                              EXTENT_DELALLOC, PAGE_UNLOCK |
1461                                              PAGE_SET_PRIVATE2);
1462                 if (!nolock && nocow)
1463                         btrfs_end_write_no_snapshoting(root);
1464                 cur_offset = extent_end;
1465                 if (cur_offset > end)
1466                         break;
1467         }
1468         btrfs_release_path(path);
1469
1470         if (cur_offset <= end && cow_start == (u64)-1) {
1471                 cow_start = cur_offset;
1472                 cur_offset = end;
1473         }
1474
1475         if (cow_start != (u64)-1) {
1476                 ret = cow_file_range(inode, locked_page, cow_start, end,
1477                                      page_started, nr_written, 1);
1478                 if (ret)
1479                         goto error;
1480         }
1481
1482 error:
1483         err = btrfs_end_transaction(trans, root);
1484         if (!ret)
1485                 ret = err;
1486
1487         if (ret && cur_offset < end)
1488                 extent_clear_unlock_delalloc(inode, cur_offset, end,
1489                                              locked_page, EXTENT_LOCKED |
1490                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1491                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1492                                              PAGE_CLEAR_DIRTY |
1493                                              PAGE_SET_WRITEBACK |
1494                                              PAGE_END_WRITEBACK);
1495         btrfs_free_path(path);
1496         return ret;
1497 }
1498
1499 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1500 {
1501
1502         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1503             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1504                 return 0;
1505
1506         /*
1507          * @defrag_bytes is a hint value, no spinlock held here,
1508          * if is not zero, it means the file is defragging.
1509          * Force cow if given extent needs to be defragged.
1510          */
1511         if (BTRFS_I(inode)->defrag_bytes &&
1512             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1513                            EXTENT_DEFRAG, 0, NULL))
1514                 return 1;
1515
1516         return 0;
1517 }
1518
1519 /*
1520  * extent_io.c call back to do delayed allocation processing
1521  */
1522 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1523                               u64 start, u64 end, int *page_started,
1524                               unsigned long *nr_written)
1525 {
1526         int ret;
1527         int force_cow = need_force_cow(inode, start, end);
1528
1529         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1530                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1531                                          page_started, 1, nr_written);
1532         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1533                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1534                                          page_started, 0, nr_written);
1535         } else if (!inode_need_compress(inode)) {
1536                 ret = cow_file_range(inode, locked_page, start, end,
1537                                       page_started, nr_written, 1);
1538         } else {
1539                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1540                         &BTRFS_I(inode)->runtime_flags);
1541                 ret = cow_file_range_async(inode, locked_page, start, end,
1542                                            page_started, nr_written);
1543         }
1544         return ret;
1545 }
1546
1547 static void btrfs_split_extent_hook(struct inode *inode,
1548                                     struct extent_state *orig, u64 split)
1549 {
1550         u64 size;
1551
1552         /* not delalloc, ignore it */
1553         if (!(orig->state & EXTENT_DELALLOC))
1554                 return;
1555
1556         size = orig->end - orig->start + 1;
1557         if (size > BTRFS_MAX_EXTENT_SIZE) {
1558                 u64 num_extents;
1559                 u64 new_size;
1560
1561                 /*
1562                  * See the explanation in btrfs_merge_extent_hook, the same
1563                  * applies here, just in reverse.
1564                  */
1565                 new_size = orig->end - split + 1;
1566                 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1567                                         BTRFS_MAX_EXTENT_SIZE);
1568                 new_size = split - orig->start;
1569                 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1570                                         BTRFS_MAX_EXTENT_SIZE);
1571                 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1572                               BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1573                         return;
1574         }
1575
1576         spin_lock(&BTRFS_I(inode)->lock);
1577         BTRFS_I(inode)->outstanding_extents++;
1578         spin_unlock(&BTRFS_I(inode)->lock);
1579 }
1580
1581 /*
1582  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1583  * extents so we can keep track of new extents that are just merged onto old
1584  * extents, such as when we are doing sequential writes, so we can properly
1585  * account for the metadata space we'll need.
1586  */
1587 static void btrfs_merge_extent_hook(struct inode *inode,
1588                                     struct extent_state *new,
1589                                     struct extent_state *other)
1590 {
1591         u64 new_size, old_size;
1592         u64 num_extents;
1593
1594         /* not delalloc, ignore it */
1595         if (!(other->state & EXTENT_DELALLOC))
1596                 return;
1597
1598         if (new->start > other->start)
1599                 new_size = new->end - other->start + 1;
1600         else
1601                 new_size = other->end - new->start + 1;
1602
1603         /* we're not bigger than the max, unreserve the space and go */
1604         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1605                 spin_lock(&BTRFS_I(inode)->lock);
1606                 BTRFS_I(inode)->outstanding_extents--;
1607                 spin_unlock(&BTRFS_I(inode)->lock);
1608                 return;
1609         }
1610
1611         /*
1612          * We have to add up either side to figure out how many extents were
1613          * accounted for before we merged into one big extent.  If the number of
1614          * extents we accounted for is <= the amount we need for the new range
1615          * then we can return, otherwise drop.  Think of it like this
1616          *
1617          * [ 4k][MAX_SIZE]
1618          *
1619          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1620          * need 2 outstanding extents, on one side we have 1 and the other side
1621          * we have 1 so they are == and we can return.  But in this case
1622          *
1623          * [MAX_SIZE+4k][MAX_SIZE+4k]
1624          *
1625          * Each range on their own accounts for 2 extents, but merged together
1626          * they are only 3 extents worth of accounting, so we need to drop in
1627          * this case.
1628          */
1629         old_size = other->end - other->start + 1;
1630         num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1631                                 BTRFS_MAX_EXTENT_SIZE);
1632         old_size = new->end - new->start + 1;
1633         num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1634                                  BTRFS_MAX_EXTENT_SIZE);
1635
1636         if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1637                       BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1638                 return;
1639
1640         spin_lock(&BTRFS_I(inode)->lock);
1641         BTRFS_I(inode)->outstanding_extents--;
1642         spin_unlock(&BTRFS_I(inode)->lock);
1643 }
1644
1645 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1646                                       struct inode *inode)
1647 {
1648         spin_lock(&root->delalloc_lock);
1649         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1650                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1651                               &root->delalloc_inodes);
1652                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1653                         &BTRFS_I(inode)->runtime_flags);
1654                 root->nr_delalloc_inodes++;
1655                 if (root->nr_delalloc_inodes == 1) {
1656                         spin_lock(&root->fs_info->delalloc_root_lock);
1657                         BUG_ON(!list_empty(&root->delalloc_root));
1658                         list_add_tail(&root->delalloc_root,
1659                                       &root->fs_info->delalloc_roots);
1660                         spin_unlock(&root->fs_info->delalloc_root_lock);
1661                 }
1662         }
1663         spin_unlock(&root->delalloc_lock);
1664 }
1665
1666 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1667                                      struct inode *inode)
1668 {
1669         spin_lock(&root->delalloc_lock);
1670         if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1671                 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1672                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1673                           &BTRFS_I(inode)->runtime_flags);
1674                 root->nr_delalloc_inodes--;
1675                 if (!root->nr_delalloc_inodes) {
1676                         spin_lock(&root->fs_info->delalloc_root_lock);
1677                         BUG_ON(list_empty(&root->delalloc_root));
1678                         list_del_init(&root->delalloc_root);
1679                         spin_unlock(&root->fs_info->delalloc_root_lock);
1680                 }
1681         }
1682         spin_unlock(&root->delalloc_lock);
1683 }
1684
1685 /*
1686  * extent_io.c set_bit_hook, used to track delayed allocation
1687  * bytes in this file, and to maintain the list of inodes that
1688  * have pending delalloc work to be done.
1689  */
1690 static void btrfs_set_bit_hook(struct inode *inode,
1691                                struct extent_state *state, unsigned *bits)
1692 {
1693
1694         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1695                 WARN_ON(1);
1696         /*
1697          * set_bit and clear bit hooks normally require _irqsave/restore
1698          * but in this case, we are only testing for the DELALLOC
1699          * bit, which is only set or cleared with irqs on
1700          */
1701         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1702                 struct btrfs_root *root = BTRFS_I(inode)->root;
1703                 u64 len = state->end + 1 - state->start;
1704                 bool do_list = !btrfs_is_free_space_inode(inode);
1705
1706                 if (*bits & EXTENT_FIRST_DELALLOC) {
1707                         *bits &= ~EXTENT_FIRST_DELALLOC;
1708                 } else {
1709                         spin_lock(&BTRFS_I(inode)->lock);
1710                         BTRFS_I(inode)->outstanding_extents++;
1711                         spin_unlock(&BTRFS_I(inode)->lock);
1712                 }
1713
1714                 /* For sanity tests */
1715                 if (btrfs_test_is_dummy_root(root))
1716                         return;
1717
1718                 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1719                                      root->fs_info->delalloc_batch);
1720                 spin_lock(&BTRFS_I(inode)->lock);
1721                 BTRFS_I(inode)->delalloc_bytes += len;
1722                 if (*bits & EXTENT_DEFRAG)
1723                         BTRFS_I(inode)->defrag_bytes += len;
1724                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1725                                          &BTRFS_I(inode)->runtime_flags))
1726                         btrfs_add_delalloc_inodes(root, inode);
1727                 spin_unlock(&BTRFS_I(inode)->lock);
1728         }
1729 }
1730
1731 /*
1732  * extent_io.c clear_bit_hook, see set_bit_hook for why
1733  */
1734 static void btrfs_clear_bit_hook(struct inode *inode,
1735                                  struct extent_state *state,
1736                                  unsigned *bits)
1737 {
1738         u64 len = state->end + 1 - state->start;
1739         u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
1740                                     BTRFS_MAX_EXTENT_SIZE);
1741
1742         spin_lock(&BTRFS_I(inode)->lock);
1743         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
1744                 BTRFS_I(inode)->defrag_bytes -= len;
1745         spin_unlock(&BTRFS_I(inode)->lock);
1746
1747         /*
1748          * set_bit and clear bit hooks normally require _irqsave/restore
1749          * but in this case, we are only testing for the DELALLOC
1750          * bit, which is only set or cleared with irqs on
1751          */
1752         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1753                 struct btrfs_root *root = BTRFS_I(inode)->root;
1754                 bool do_list = !btrfs_is_free_space_inode(inode);
1755
1756                 if (*bits & EXTENT_FIRST_DELALLOC) {
1757                         *bits &= ~EXTENT_FIRST_DELALLOC;
1758                 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1759                         spin_lock(&BTRFS_I(inode)->lock);
1760                         BTRFS_I(inode)->outstanding_extents -= num_extents;
1761                         spin_unlock(&BTRFS_I(inode)->lock);
1762                 }
1763
1764                 /*
1765                  * We don't reserve metadata space for space cache inodes so we
1766                  * don't need to call dellalloc_release_metadata if there is an
1767                  * error.
1768                  */
1769                 if (*bits & EXTENT_DO_ACCOUNTING &&
1770                     root != root->fs_info->tree_root)
1771                         btrfs_delalloc_release_metadata(inode, len);
1772
1773                 /* For sanity tests. */
1774                 if (btrfs_test_is_dummy_root(root))
1775                         return;
1776
1777                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1778                     && do_list && !(state->state & EXTENT_NORESERVE))
1779                         btrfs_free_reserved_data_space_noquota(inode,
1780                                         state->start, len);
1781
1782                 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1783                                      root->fs_info->delalloc_batch);
1784                 spin_lock(&BTRFS_I(inode)->lock);
1785                 BTRFS_I(inode)->delalloc_bytes -= len;
1786                 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1787                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1788                              &BTRFS_I(inode)->runtime_flags))
1789                         btrfs_del_delalloc_inode(root, inode);
1790                 spin_unlock(&BTRFS_I(inode)->lock);
1791         }
1792 }
1793
1794 /*
1795  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1796  * we don't create bios that span stripes or chunks
1797  */
1798 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1799                          size_t size, struct bio *bio,
1800                          unsigned long bio_flags)
1801 {
1802         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1803         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1804         u64 length = 0;
1805         u64 map_length;
1806         int ret;
1807
1808         if (bio_flags & EXTENT_BIO_COMPRESSED)
1809                 return 0;
1810
1811         length = bio->bi_iter.bi_size;
1812         map_length = length;
1813         ret = btrfs_map_block(root->fs_info, rw, logical,
1814                               &map_length, NULL, 0);
1815         /* Will always return 0 with map_multi == NULL */
1816         BUG_ON(ret < 0);
1817         if (map_length < length + size)
1818                 return 1;
1819         return 0;
1820 }
1821
1822 /*
1823  * in order to insert checksums into the metadata in large chunks,
1824  * we wait until bio submission time.   All the pages in the bio are
1825  * checksummed and sums are attached onto the ordered extent record.
1826  *
1827  * At IO completion time the cums attached on the ordered extent record
1828  * are inserted into the btree
1829  */
1830 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1831                                     struct bio *bio, int mirror_num,
1832                                     unsigned long bio_flags,
1833                                     u64 bio_offset)
1834 {
1835         struct btrfs_root *root = BTRFS_I(inode)->root;
1836         int ret = 0;
1837
1838         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1839         BUG_ON(ret); /* -ENOMEM */
1840         return 0;
1841 }
1842
1843 /*
1844  * in order to insert checksums into the metadata in large chunks,
1845  * we wait until bio submission time.   All the pages in the bio are
1846  * checksummed and sums are attached onto the ordered extent record.
1847  *
1848  * At IO completion time the cums attached on the ordered extent record
1849  * are inserted into the btree
1850  */
1851 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1852                           int mirror_num, unsigned long bio_flags,
1853                           u64 bio_offset)
1854 {
1855         struct btrfs_root *root = BTRFS_I(inode)->root;
1856         int ret;
1857
1858         ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1859         if (ret) {
1860                 bio->bi_error = ret;
1861                 bio_endio(bio);
1862         }
1863         return ret;
1864 }
1865
1866 /*
1867  * extent_io.c submission hook. This does the right thing for csum calculation
1868  * on write, or reading the csums from the tree before a read
1869  */
1870 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1871                           int mirror_num, unsigned long bio_flags,
1872                           u64 bio_offset)
1873 {
1874         struct btrfs_root *root = BTRFS_I(inode)->root;
1875         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1876         int ret = 0;
1877         int skip_sum;
1878         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1879
1880         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1881
1882         if (btrfs_is_free_space_inode(inode))
1883                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1884
1885         if (!(rw & REQ_WRITE)) {
1886                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1887                 if (ret)
1888                         goto out;
1889
1890                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1891                         ret = btrfs_submit_compressed_read(inode, bio,
1892                                                            mirror_num,
1893                                                            bio_flags);
1894                         goto out;
1895                 } else if (!skip_sum) {
1896                         ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1897                         if (ret)
1898                                 goto out;
1899                 }
1900                 goto mapit;
1901         } else if (async && !skip_sum) {
1902                 /* csum items have already been cloned */
1903                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1904                         goto mapit;
1905                 /* we're doing a write, do the async checksumming */
1906                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1907                                    inode, rw, bio, mirror_num,
1908                                    bio_flags, bio_offset,
1909                                    __btrfs_submit_bio_start,
1910                                    __btrfs_submit_bio_done);
1911                 goto out;
1912         } else if (!skip_sum) {
1913                 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1914                 if (ret)
1915                         goto out;
1916         }
1917
1918 mapit:
1919         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1920
1921 out:
1922         if (ret < 0) {
1923                 bio->bi_error = ret;
1924                 bio_endio(bio);
1925         }
1926         return ret;
1927 }
1928
1929 /*
1930  * given a list of ordered sums record them in the inode.  This happens
1931  * at IO completion time based on sums calculated at bio submission time.
1932  */
1933 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1934                              struct inode *inode, u64 file_offset,
1935                              struct list_head *list)
1936 {
1937         struct btrfs_ordered_sum *sum;
1938
1939         list_for_each_entry(sum, list, list) {
1940                 trans->adding_csums = 1;
1941                 btrfs_csum_file_blocks(trans,
1942                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1943                 trans->adding_csums = 0;
1944         }
1945         return 0;
1946 }
1947
1948 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1949                               struct extent_state **cached_state)
1950 {
1951         WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1952         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1953                                    cached_state, GFP_NOFS);
1954 }
1955
1956 /* see btrfs_writepage_start_hook for details on why this is required */
1957 struct btrfs_writepage_fixup {
1958         struct page *page;
1959         struct btrfs_work work;
1960 };
1961
1962 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1963 {
1964         struct btrfs_writepage_fixup *fixup;
1965         struct btrfs_ordered_extent *ordered;
1966         struct extent_state *cached_state = NULL;
1967         struct page *page;
1968         struct inode *inode;
1969         u64 page_start;
1970         u64 page_end;
1971         int ret;
1972
1973         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1974         page = fixup->page;
1975 again:
1976         lock_page(page);
1977         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1978                 ClearPageChecked(page);
1979                 goto out_page;
1980         }
1981
1982         inode = page->mapping->host;
1983         page_start = page_offset(page);
1984         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1985
1986         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1987                          &cached_state);
1988
1989         /* already ordered? We're done */
1990         if (PagePrivate2(page))
1991                 goto out;
1992
1993         ordered = btrfs_lookup_ordered_extent(inode, page_start);
1994         if (ordered) {
1995                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1996                                      page_end, &cached_state, GFP_NOFS);
1997                 unlock_page(page);
1998                 btrfs_start_ordered_extent(inode, ordered, 1);
1999                 btrfs_put_ordered_extent(ordered);
2000                 goto again;
2001         }
2002
2003         ret = btrfs_delalloc_reserve_space(inode, page_start,
2004                                            PAGE_CACHE_SIZE);
2005         if (ret) {
2006                 mapping_set_error(page->mapping, ret);
2007                 end_extent_writepage(page, ret, page_start, page_end);
2008                 ClearPageChecked(page);
2009                 goto out;
2010          }
2011
2012         btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
2013         ClearPageChecked(page);
2014         set_page_dirty(page);
2015 out:
2016         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2017                              &cached_state, GFP_NOFS);
2018 out_page:
2019         unlock_page(page);
2020         page_cache_release(page);
2021         kfree(fixup);
2022 }
2023
2024 /*
2025  * There are a few paths in the higher layers of the kernel that directly
2026  * set the page dirty bit without asking the filesystem if it is a
2027  * good idea.  This causes problems because we want to make sure COW
2028  * properly happens and the data=ordered rules are followed.
2029  *
2030  * In our case any range that doesn't have the ORDERED bit set
2031  * hasn't been properly setup for IO.  We kick off an async process
2032  * to fix it up.  The async helper will wait for ordered extents, set
2033  * the delalloc bit and make it safe to write the page.
2034  */
2035 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2036 {
2037         struct inode *inode = page->mapping->host;
2038         struct btrfs_writepage_fixup *fixup;
2039         struct btrfs_root *root = BTRFS_I(inode)->root;
2040
2041         /* this page is properly in the ordered list */
2042         if (TestClearPagePrivate2(page))
2043                 return 0;
2044
2045         if (PageChecked(page))
2046                 return -EAGAIN;
2047
2048         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2049         if (!fixup)
2050                 return -EAGAIN;
2051
2052         SetPageChecked(page);
2053         page_cache_get(page);
2054         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2055                         btrfs_writepage_fixup_worker, NULL, NULL);
2056         fixup->page = page;
2057         btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
2058         return -EBUSY;
2059 }
2060
2061 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2062                                        struct inode *inode, u64 file_pos,
2063                                        u64 disk_bytenr, u64 disk_num_bytes,
2064                                        u64 num_bytes, u64 ram_bytes,
2065                                        u8 compression, u8 encryption,
2066                                        u16 other_encoding, int extent_type)
2067 {
2068         struct btrfs_root *root = BTRFS_I(inode)->root;
2069         struct btrfs_file_extent_item *fi;
2070         struct btrfs_path *path;
2071         struct extent_buffer *leaf;
2072         struct btrfs_key ins;
2073         int extent_inserted = 0;
2074         int ret;
2075
2076         path = btrfs_alloc_path();
2077         if (!path)
2078                 return -ENOMEM;
2079
2080         /*
2081          * we may be replacing one extent in the tree with another.
2082          * The new extent is pinned in the extent map, and we don't want
2083          * to drop it from the cache until it is completely in the btree.
2084          *
2085          * So, tell btrfs_drop_extents to leave this extent in the cache.
2086          * the caller is expected to unpin it and allow it to be merged
2087          * with the others.
2088          */
2089         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2090                                    file_pos + num_bytes, NULL, 0,
2091                                    1, sizeof(*fi), &extent_inserted);
2092         if (ret)
2093                 goto out;
2094
2095         if (!extent_inserted) {
2096                 ins.objectid = btrfs_ino(inode);
2097                 ins.offset = file_pos;
2098                 ins.type = BTRFS_EXTENT_DATA_KEY;
2099
2100                 path->leave_spinning = 1;
2101                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2102                                               sizeof(*fi));
2103                 if (ret)
2104                         goto out;
2105         }
2106         leaf = path->nodes[0];
2107         fi = btrfs_item_ptr(leaf, path->slots[0],
2108                             struct btrfs_file_extent_item);
2109         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2110         btrfs_set_file_extent_type(leaf, fi, extent_type);
2111         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2112         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2113         btrfs_set_file_extent_offset(leaf, fi, 0);
2114         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2115         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2116         btrfs_set_file_extent_compression(leaf, fi, compression);
2117         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2118         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2119
2120         btrfs_mark_buffer_dirty(leaf);
2121         btrfs_release_path(path);
2122
2123         inode_add_bytes(inode, num_bytes);
2124
2125         ins.objectid = disk_bytenr;
2126         ins.offset = disk_num_bytes;
2127         ins.type = BTRFS_EXTENT_ITEM_KEY;
2128         ret = btrfs_alloc_reserved_file_extent(trans, root,
2129                                         root->root_key.objectid,
2130                                         btrfs_ino(inode), file_pos, &ins);
2131         if (ret < 0)
2132                 goto out;
2133         /*
2134          * Release the reserved range from inode dirty range map, and
2135          * move it to delayed ref codes, as now accounting only happens at
2136          * commit_transaction() time.
2137          */
2138         btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2139         ret = btrfs_add_delayed_qgroup_reserve(root->fs_info, trans,
2140                         root->objectid, disk_bytenr, ram_bytes);
2141 out:
2142         btrfs_free_path(path);
2143
2144         return ret;
2145 }
2146
2147 /* snapshot-aware defrag */
2148 struct sa_defrag_extent_backref {
2149         struct rb_node node;
2150         struct old_sa_defrag_extent *old;
2151         u64 root_id;
2152         u64 inum;
2153         u64 file_pos;
2154         u64 extent_offset;
2155         u64 num_bytes;
2156         u64 generation;
2157 };
2158
2159 struct old_sa_defrag_extent {
2160         struct list_head list;
2161         struct new_sa_defrag_extent *new;
2162
2163         u64 extent_offset;
2164         u64 bytenr;
2165         u64 offset;
2166         u64 len;
2167         int count;
2168 };
2169
2170 struct new_sa_defrag_extent {
2171         struct rb_root root;
2172         struct list_head head;
2173         struct btrfs_path *path;
2174         struct inode *inode;
2175         u64 file_pos;
2176         u64 len;
2177         u64 bytenr;
2178         u64 disk_len;
2179         u8 compress_type;
2180 };
2181
2182 static int backref_comp(struct sa_defrag_extent_backref *b1,
2183                         struct sa_defrag_extent_backref *b2)
2184 {
2185         if (b1->root_id < b2->root_id)
2186                 return -1;
2187         else if (b1->root_id > b2->root_id)
2188                 return 1;
2189
2190         if (b1->inum < b2->inum)
2191                 return -1;
2192         else if (b1->inum > b2->inum)
2193                 return 1;
2194
2195         if (b1->file_pos < b2->file_pos)
2196                 return -1;
2197         else if (b1->file_pos > b2->file_pos)
2198                 return 1;
2199
2200         /*
2201          * [------------------------------] ===> (a range of space)
2202          *     |<--->|   |<---->| =============> (fs/file tree A)
2203          * |<---------------------------->| ===> (fs/file tree B)
2204          *
2205          * A range of space can refer to two file extents in one tree while
2206          * refer to only one file extent in another tree.
2207          *
2208          * So we may process a disk offset more than one time(two extents in A)
2209          * and locate at the same extent(one extent in B), then insert two same
2210          * backrefs(both refer to the extent in B).
2211          */
2212         return 0;
2213 }
2214
2215 static void backref_insert(struct rb_root *root,
2216                            struct sa_defrag_extent_backref *backref)
2217 {
2218         struct rb_node **p = &root->rb_node;
2219         struct rb_node *parent = NULL;
2220         struct sa_defrag_extent_backref *entry;
2221         int ret;
2222
2223         while (*p) {
2224                 parent = *p;
2225                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2226
2227                 ret = backref_comp(backref, entry);
2228                 if (ret < 0)
2229                         p = &(*p)->rb_left;
2230                 else
2231                         p = &(*p)->rb_right;
2232         }
2233
2234         rb_link_node(&backref->node, parent, p);
2235         rb_insert_color(&backref->node, root);
2236 }
2237
2238 /*
2239  * Note the backref might has changed, and in this case we just return 0.
2240  */
2241 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2242                                        void *ctx)
2243 {
2244         struct btrfs_file_extent_item *extent;
2245         struct btrfs_fs_info *fs_info;
2246         struct old_sa_defrag_extent *old = ctx;
2247         struct new_sa_defrag_extent *new = old->new;
2248         struct btrfs_path *path = new->path;
2249         struct btrfs_key key;
2250         struct btrfs_root *root;
2251         struct sa_defrag_extent_backref *backref;
2252         struct extent_buffer *leaf;
2253         struct inode *inode = new->inode;
2254         int slot;
2255         int ret;
2256         u64 extent_offset;
2257         u64 num_bytes;
2258
2259         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2260             inum == btrfs_ino(inode))
2261                 return 0;
2262
2263         key.objectid = root_id;
2264         key.type = BTRFS_ROOT_ITEM_KEY;
2265         key.offset = (u64)-1;
2266
2267         fs_info = BTRFS_I(inode)->root->fs_info;
2268         root = btrfs_read_fs_root_no_name(fs_info, &key);
2269         if (IS_ERR(root)) {
2270                 if (PTR_ERR(root) == -ENOENT)
2271                         return 0;
2272                 WARN_ON(1);
2273                 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2274                          inum, offset, root_id);
2275                 return PTR_ERR(root);
2276         }
2277
2278         key.objectid = inum;
2279         key.type = BTRFS_EXTENT_DATA_KEY;
2280         if (offset > (u64)-1 << 32)
2281                 key.offset = 0;
2282         else
2283                 key.offset = offset;
2284
2285         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2286         if (WARN_ON(ret < 0))
2287                 return ret;
2288         ret = 0;
2289
2290         while (1) {
2291                 cond_resched();
2292
2293                 leaf = path->nodes[0];
2294                 slot = path->slots[0];
2295
2296                 if (slot >= btrfs_header_nritems(leaf)) {
2297                         ret = btrfs_next_leaf(root, path);
2298                         if (ret < 0) {
2299                                 goto out;
2300                         } else if (ret > 0) {
2301                                 ret = 0;
2302                                 goto out;
2303                         }
2304                         continue;
2305                 }
2306
2307                 path->slots[0]++;
2308
2309                 btrfs_item_key_to_cpu(leaf, &key, slot);
2310
2311                 if (key.objectid > inum)
2312                         goto out;
2313
2314                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2315                         continue;
2316
2317                 extent = btrfs_item_ptr(leaf, slot,
2318                                         struct btrfs_file_extent_item);
2319
2320                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2321                         continue;
2322
2323                 /*
2324                  * 'offset' refers to the exact key.offset,
2325                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2326                  * (key.offset - extent_offset).
2327                  */
2328                 if (key.offset != offset)
2329                         continue;
2330
2331                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2332                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2333
2334                 if (extent_offset >= old->extent_offset + old->offset +
2335                     old->len || extent_offset + num_bytes <=
2336                     old->extent_offset + old->offset)
2337                         continue;
2338                 break;
2339         }
2340
2341         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2342         if (!backref) {
2343                 ret = -ENOENT;
2344                 goto out;
2345         }
2346
2347         backref->root_id = root_id;
2348         backref->inum = inum;
2349         backref->file_pos = offset;
2350         backref->num_bytes = num_bytes;
2351         backref->extent_offset = extent_offset;
2352         backref->generation = btrfs_file_extent_generation(leaf, extent);
2353         backref->old = old;
2354         backref_insert(&new->root, backref);
2355         old->count++;
2356 out:
2357         btrfs_release_path(path);
2358         WARN_ON(ret);
2359         return ret;
2360 }
2361
2362 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2363                                    struct new_sa_defrag_extent *new)
2364 {
2365         struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2366         struct old_sa_defrag_extent *old, *tmp;
2367         int ret;
2368
2369         new->path = path;
2370
2371         list_for_each_entry_safe(old, tmp, &new->head, list) {
2372                 ret = iterate_inodes_from_logical(old->bytenr +
2373                                                   old->extent_offset, fs_info,
2374                                                   path, record_one_backref,
2375                                                   old);
2376                 if (ret < 0 && ret != -ENOENT)
2377                         return false;
2378
2379                 /* no backref to be processed for this extent */
2380                 if (!old->count) {
2381                         list_del(&old->list);
2382                         kfree(old);
2383                 }
2384         }
2385
2386         if (list_empty(&new->head))
2387                 return false;
2388
2389         return true;
2390 }
2391
2392 static int relink_is_mergable(struct extent_buffer *leaf,
2393                               struct btrfs_file_extent_item *fi,
2394                               struct new_sa_defrag_extent *new)
2395 {
2396         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2397                 return 0;
2398
2399         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2400                 return 0;
2401
2402         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2403                 return 0;
2404
2405         if (btrfs_file_extent_encryption(leaf, fi) ||
2406             btrfs_file_extent_other_encoding(leaf, fi))
2407                 return 0;
2408
2409         return 1;
2410 }
2411
2412 /*
2413  * Note the backref might has changed, and in this case we just return 0.
2414  */
2415 static noinline int relink_extent_backref(struct btrfs_path *path,
2416                                  struct sa_defrag_extent_backref *prev,
2417                                  struct sa_defrag_extent_backref *backref)
2418 {
2419         struct btrfs_file_extent_item *extent;
2420         struct btrfs_file_extent_item *item;
2421         struct btrfs_ordered_extent *ordered;
2422         struct btrfs_trans_handle *trans;
2423         struct btrfs_fs_info *fs_info;
2424         struct btrfs_root *root;
2425         struct btrfs_key key;
2426         struct extent_buffer *leaf;
2427         struct old_sa_defrag_extent *old = backref->old;
2428         struct new_sa_defrag_extent *new = old->new;
2429         struct inode *src_inode = new->inode;
2430         struct inode *inode;
2431         struct extent_state *cached = NULL;
2432         int ret = 0;
2433         u64 start;
2434         u64 len;
2435         u64 lock_start;
2436         u64 lock_end;
2437         bool merge = false;
2438         int index;
2439
2440         if (prev && prev->root_id == backref->root_id &&
2441             prev->inum == backref->inum &&
2442             prev->file_pos + prev->num_bytes == backref->file_pos)
2443                 merge = true;
2444
2445         /* step 1: get root */
2446         key.objectid = backref->root_id;
2447         key.type = BTRFS_ROOT_ITEM_KEY;
2448         key.offset = (u64)-1;
2449
2450         fs_info = BTRFS_I(src_inode)->root->fs_info;
2451         index = srcu_read_lock(&fs_info->subvol_srcu);
2452
2453         root = btrfs_read_fs_root_no_name(fs_info, &key);
2454         if (IS_ERR(root)) {
2455                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2456                 if (PTR_ERR(root) == -ENOENT)
2457                         return 0;
2458                 return PTR_ERR(root);
2459         }
2460
2461         if (btrfs_root_readonly(root)) {
2462                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2463                 return 0;
2464         }
2465
2466         /* step 2: get inode */
2467         key.objectid = backref->inum;
2468         key.type = BTRFS_INODE_ITEM_KEY;
2469         key.offset = 0;
2470
2471         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2472         if (IS_ERR(inode)) {
2473                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2474                 return 0;
2475         }
2476
2477         srcu_read_unlock(&fs_info->subvol_srcu, index);
2478
2479         /* step 3: relink backref */
2480         lock_start = backref->file_pos;
2481         lock_end = backref->file_pos + backref->num_bytes - 1;
2482         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2483                          0, &cached);
2484
2485         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2486         if (ordered) {
2487                 btrfs_put_ordered_extent(ordered);
2488                 goto out_unlock;
2489         }
2490
2491         trans = btrfs_join_transaction(root);
2492         if (IS_ERR(trans)) {
2493                 ret = PTR_ERR(trans);
2494                 goto out_unlock;
2495         }
2496
2497         key.objectid = backref->inum;
2498         key.type = BTRFS_EXTENT_DATA_KEY;
2499         key.offset = backref->file_pos;
2500
2501         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2502         if (ret < 0) {
2503                 goto out_free_path;
2504         } else if (ret > 0) {
2505                 ret = 0;
2506                 goto out_free_path;
2507         }
2508
2509         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2510                                 struct btrfs_file_extent_item);
2511
2512         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2513             backref->generation)
2514                 goto out_free_path;
2515
2516         btrfs_release_path(path);
2517
2518         start = backref->file_pos;
2519         if (backref->extent_offset < old->extent_offset + old->offset)
2520                 start += old->extent_offset + old->offset -
2521                          backref->extent_offset;
2522
2523         len = min(backref->extent_offset + backref->num_bytes,
2524                   old->extent_offset + old->offset + old->len);
2525         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2526
2527         ret = btrfs_drop_extents(trans, root, inode, start,
2528                                  start + len, 1);
2529         if (ret)
2530                 goto out_free_path;
2531 again:
2532         key.objectid = btrfs_ino(inode);
2533         key.type = BTRFS_EXTENT_DATA_KEY;
2534         key.offset = start;
2535
2536         path->leave_spinning = 1;
2537         if (merge) {
2538                 struct btrfs_file_extent_item *fi;
2539                 u64 extent_len;
2540                 struct btrfs_key found_key;
2541
2542                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2543                 if (ret < 0)
2544                         goto out_free_path;
2545
2546                 path->slots[0]--;
2547                 leaf = path->nodes[0];
2548                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2549
2550                 fi = btrfs_item_ptr(leaf, path->slots[0],
2551                                     struct btrfs_file_extent_item);
2552                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2553
2554                 if (extent_len + found_key.offset == start &&
2555                     relink_is_mergable(leaf, fi, new)) {
2556                         btrfs_set_file_extent_num_bytes(leaf, fi,
2557                                                         extent_len + len);
2558                         btrfs_mark_buffer_dirty(leaf);
2559                         inode_add_bytes(inode, len);
2560
2561                         ret = 1;
2562                         goto out_free_path;
2563                 } else {
2564                         merge = false;
2565                         btrfs_release_path(path);
2566                         goto again;
2567                 }
2568         }
2569
2570         ret = btrfs_insert_empty_item(trans, root, path, &key,
2571                                         sizeof(*extent));
2572         if (ret) {
2573                 btrfs_abort_transaction(trans, root, ret);
2574                 goto out_free_path;
2575         }
2576
2577         leaf = path->nodes[0];
2578         item = btrfs_item_ptr(leaf, path->slots[0],
2579                                 struct btrfs_file_extent_item);
2580         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2581         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2582         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2583         btrfs_set_file_extent_num_bytes(leaf, item, len);
2584         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2585         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2586         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2587         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2588         btrfs_set_file_extent_encryption(leaf, item, 0);
2589         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2590
2591         btrfs_mark_buffer_dirty(leaf);
2592         inode_add_bytes(inode, len);
2593         btrfs_release_path(path);
2594
2595         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2596                         new->disk_len, 0,
2597                         backref->root_id, backref->inum,
2598                         new->file_pos, 0);      /* start - extent_offset */
2599         if (ret) {
2600                 btrfs_abort_transaction(trans, root, ret);
2601                 goto out_free_path;
2602         }
2603
2604         ret = 1;
2605 out_free_path:
2606         btrfs_release_path(path);
2607         path->leave_spinning = 0;
2608         btrfs_end_transaction(trans, root);
2609 out_unlock:
2610         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2611                              &cached, GFP_NOFS);
2612         iput(inode);
2613         return ret;
2614 }
2615
2616 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2617 {
2618         struct old_sa_defrag_extent *old, *tmp;
2619
2620         if (!new)
2621                 return;
2622
2623         list_for_each_entry_safe(old, tmp, &new->head, list) {
2624                 kfree(old);
2625         }
2626         kfree(new);
2627 }
2628
2629 static void relink_file_extents(struct new_sa_defrag_extent *new)
2630 {
2631         struct btrfs_path *path;
2632         struct sa_defrag_extent_backref *backref;
2633         struct sa_defrag_extent_backref *prev = NULL;
2634         struct inode *inode;
2635         struct btrfs_root *root;
2636         struct rb_node *node;
2637         int ret;
2638
2639         inode = new->inode;
2640         root = BTRFS_I(inode)->root;
2641
2642         path = btrfs_alloc_path();
2643         if (!path)
2644                 return;
2645
2646         if (!record_extent_backrefs(path, new)) {
2647                 btrfs_free_path(path);
2648                 goto out;
2649         }
2650         btrfs_release_path(path);
2651
2652         while (1) {
2653                 node = rb_first(&new->root);
2654                 if (!node)
2655                         break;
2656                 rb_erase(node, &new->root);
2657
2658                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2659
2660                 ret = relink_extent_backref(path, prev, backref);
2661                 WARN_ON(ret < 0);
2662
2663                 kfree(prev);
2664
2665                 if (ret == 1)
2666                         prev = backref;
2667                 else
2668                         prev = NULL;
2669                 cond_resched();
2670         }
2671         kfree(prev);
2672
2673         btrfs_free_path(path);
2674 out:
2675         free_sa_defrag_extent(new);
2676
2677         atomic_dec(&root->fs_info->defrag_running);
2678         wake_up(&root->fs_info->transaction_wait);
2679 }
2680
2681 static struct new_sa_defrag_extent *
2682 record_old_file_extents(struct inode *inode,
2683                         struct btrfs_ordered_extent *ordered)
2684 {
2685         struct btrfs_root *root = BTRFS_I(inode)->root;
2686         struct btrfs_path *path;
2687         struct btrfs_key key;
2688         struct old_sa_defrag_extent *old;
2689         struct new_sa_defrag_extent *new;
2690         int ret;
2691
2692         new = kmalloc(sizeof(*new), GFP_NOFS);
2693         if (!new)
2694                 return NULL;
2695
2696         new->inode = inode;
2697         new->file_pos = ordered->file_offset;
2698         new->len = ordered->len;
2699         new->bytenr = ordered->start;
2700         new->disk_len = ordered->disk_len;
2701         new->compress_type = ordered->compress_type;
2702         new->root = RB_ROOT;
2703         INIT_LIST_HEAD(&new->head);
2704
2705         path = btrfs_alloc_path();
2706         if (!path)
2707                 goto out_kfree;
2708
2709         key.objectid = btrfs_ino(inode);
2710         key.type = BTRFS_EXTENT_DATA_KEY;
2711         key.offset = new->file_pos;
2712
2713         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2714         if (ret < 0)
2715                 goto out_free_path;
2716         if (ret > 0 && path->slots[0] > 0)
2717                 path->slots[0]--;
2718
2719         /* find out all the old extents for the file range */
2720         while (1) {
2721                 struct btrfs_file_extent_item *extent;
2722                 struct extent_buffer *l;
2723                 int slot;
2724                 u64 num_bytes;
2725                 u64 offset;
2726                 u64 end;
2727                 u64 disk_bytenr;
2728                 u64 extent_offset;
2729
2730                 l = path->nodes[0];
2731                 slot = path->slots[0];
2732
2733                 if (slot >= btrfs_header_nritems(l)) {
2734                         ret = btrfs_next_leaf(root, path);
2735                         if (ret < 0)
2736                                 goto out_free_path;
2737                         else if (ret > 0)
2738                                 break;
2739                         continue;
2740                 }
2741
2742                 btrfs_item_key_to_cpu(l, &key, slot);
2743
2744                 if (key.objectid != btrfs_ino(inode))
2745                         break;
2746                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2747                         break;
2748                 if (key.offset >= new->file_pos + new->len)
2749                         break;
2750
2751                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2752
2753                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2754                 if (key.offset + num_bytes < new->file_pos)
2755                         goto next;
2756
2757                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2758                 if (!disk_bytenr)
2759                         goto next;
2760
2761                 extent_offset = btrfs_file_extent_offset(l, extent);
2762
2763                 old = kmalloc(sizeof(*old), GFP_NOFS);
2764                 if (!old)
2765                         goto out_free_path;
2766
2767                 offset = max(new->file_pos, key.offset);
2768                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2769
2770                 old->bytenr = disk_bytenr;
2771                 old->extent_offset = extent_offset;
2772                 old->offset = offset - key.offset;
2773                 old->len = end - offset;
2774                 old->new = new;
2775                 old->count = 0;
2776                 list_add_tail(&old->list, &new->head);
2777 next:
2778                 path->slots[0]++;
2779                 cond_resched();
2780         }
2781
2782         btrfs_free_path(path);
2783         atomic_inc(&root->fs_info->defrag_running);
2784
2785         return new;
2786
2787 out_free_path:
2788         btrfs_free_path(path);
2789 out_kfree:
2790         free_sa_defrag_extent(new);
2791         return NULL;
2792 }
2793
2794 static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
2795                                          u64 start, u64 len)
2796 {
2797         struct btrfs_block_group_cache *cache;
2798
2799         cache = btrfs_lookup_block_group(root->fs_info, start);
2800         ASSERT(cache);
2801
2802         spin_lock(&cache->lock);
2803         cache->delalloc_bytes -= len;
2804         spin_unlock(&cache->lock);
2805
2806         btrfs_put_block_group(cache);
2807 }
2808
2809 /* as ordered data IO finishes, this gets called so we can finish
2810  * an ordered extent if the range of bytes in the file it covers are
2811  * fully written.
2812  */
2813 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2814 {
2815         struct inode *inode = ordered_extent->inode;
2816         struct btrfs_root *root = BTRFS_I(inode)->root;
2817         struct btrfs_trans_handle *trans = NULL;
2818         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2819         struct extent_state *cached_state = NULL;
2820         struct new_sa_defrag_extent *new = NULL;
2821         int compress_type = 0;
2822         int ret = 0;
2823         u64 logical_len = ordered_extent->len;
2824         bool nolock;
2825         bool truncated = false;
2826
2827         nolock = btrfs_is_free_space_inode(inode);
2828
2829         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2830                 ret = -EIO;
2831                 goto out;
2832         }
2833
2834         btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
2835                                      ordered_extent->file_offset +
2836                                      ordered_extent->len - 1);
2837
2838         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2839                 truncated = true;
2840                 logical_len = ordered_extent->truncated_len;
2841                 /* Truncated the entire extent, don't bother adding */
2842                 if (!logical_len)
2843                         goto out;
2844         }
2845
2846         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2847                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2848
2849                 /*
2850                  * For mwrite(mmap + memset to write) case, we still reserve
2851                  * space for NOCOW range.
2852                  * As NOCOW won't cause a new delayed ref, just free the space
2853                  */
2854                 btrfs_qgroup_free_data(inode, ordered_extent->file_offset,
2855                                        ordered_extent->len);
2856                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2857                 if (nolock)
2858                         trans = btrfs_join_transaction_nolock(root);
2859                 else
2860                         trans = btrfs_join_transaction(root);
2861                 if (IS_ERR(trans)) {
2862                         ret = PTR_ERR(trans);
2863                         trans = NULL;
2864                         goto out;
2865                 }
2866                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2867                 ret = btrfs_update_inode_fallback(trans, root, inode);
2868                 if (ret) /* -ENOMEM or corruption */
2869                         btrfs_abort_transaction(trans, root, ret);
2870                 goto out;
2871         }
2872
2873         lock_extent_bits(io_tree, ordered_extent->file_offset,
2874                          ordered_extent->file_offset + ordered_extent->len - 1,
2875                          0, &cached_state);
2876
2877         ret = test_range_bit(io_tree, ordered_extent->file_offset,
2878                         ordered_extent->file_offset + ordered_extent->len - 1,
2879                         EXTENT_DEFRAG, 1, cached_state);
2880         if (ret) {
2881                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2882                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2883                         /* the inode is shared */
2884                         new = record_old_file_extents(inode, ordered_extent);
2885
2886                 clear_extent_bit(io_tree, ordered_extent->file_offset,
2887                         ordered_extent->file_offset + ordered_extent->len - 1,
2888                         EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2889         }
2890
2891         if (nolock)
2892                 trans = btrfs_join_transaction_nolock(root);
2893         else
2894                 trans = btrfs_join_transaction(root);
2895         if (IS_ERR(trans)) {
2896                 ret = PTR_ERR(trans);
2897                 trans = NULL;
2898                 goto out_unlock;
2899         }
2900
2901         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2902
2903         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2904                 compress_type = ordered_extent->compress_type;
2905         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2906                 BUG_ON(compress_type);
2907                 ret = btrfs_mark_extent_written(trans, inode,
2908                                                 ordered_extent->file_offset,
2909                                                 ordered_extent->file_offset +
2910                                                 logical_len);
2911         } else {
2912                 BUG_ON(root == root->fs_info->tree_root);
2913                 ret = insert_reserved_file_extent(trans, inode,
2914                                                 ordered_extent->file_offset,
2915                                                 ordered_extent->start,
2916                                                 ordered_extent->disk_len,
2917                                                 logical_len, logical_len,
2918                                                 compress_type, 0, 0,
2919                                                 BTRFS_FILE_EXTENT_REG);
2920                 if (!ret)
2921                         btrfs_release_delalloc_bytes(root,
2922                                                      ordered_extent->start,
2923                                                      ordered_extent->disk_len);
2924         }
2925         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2926                            ordered_extent->file_offset, ordered_extent->len,
2927                            trans->transid);
2928         if (ret < 0) {
2929                 btrfs_abort_transaction(trans, root, ret);
2930                 goto out_unlock;
2931         }
2932
2933         add_pending_csums(trans, inode, ordered_extent->file_offset,
2934                           &ordered_extent->list);
2935
2936         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2937         ret = btrfs_update_inode_fallback(trans, root, inode);
2938         if (ret) { /* -ENOMEM or corruption */
2939                 btrfs_abort_transaction(trans, root, ret);
2940                 goto out_unlock;
2941         }
2942         ret = 0;
2943 out_unlock:
2944         unlock_extent_cached(io_tree, ordered_extent->file_offset,
2945                              ordered_extent->file_offset +
2946                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
2947 out:
2948         if (root != root->fs_info->tree_root)
2949                 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2950         if (trans)
2951                 btrfs_end_transaction(trans, root);
2952
2953         if (ret || truncated) {
2954                 u64 start, end;
2955
2956                 if (truncated)
2957                         start = ordered_extent->file_offset + logical_len;
2958                 else
2959                         start = ordered_extent->file_offset;
2960                 end = ordered_extent->file_offset + ordered_extent->len - 1;
2961                 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2962
2963                 /* Drop the cache for the part of the extent we didn't write. */
2964                 btrfs_drop_extent_cache(inode, start, end, 0);
2965
2966                 /*
2967                  * If the ordered extent had an IOERR or something else went
2968                  * wrong we need to return the space for this ordered extent
2969                  * back to the allocator.  We only free the extent in the
2970                  * truncated case if we didn't write out the extent at all.
2971                  */
2972                 if ((ret || !logical_len) &&
2973                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2974                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2975                         btrfs_free_reserved_extent(root, ordered_extent->start,
2976                                                    ordered_extent->disk_len, 1);
2977         }
2978
2979
2980         /*
2981          * This needs to be done to make sure anybody waiting knows we are done
2982          * updating everything for this ordered extent.
2983          */
2984         btrfs_remove_ordered_extent(inode, ordered_extent);
2985
2986         /* for snapshot-aware defrag */
2987         if (new) {
2988                 if (ret) {
2989                         free_sa_defrag_extent(new);
2990                         atomic_dec(&root->fs_info->defrag_running);
2991                 } else {
2992                         relink_file_extents(new);
2993                 }
2994         }
2995
2996         /* once for us */
2997         btrfs_put_ordered_extent(ordered_extent);
2998         /* once for the tree */
2999         btrfs_put_ordered_extent(ordered_extent);
3000
3001         return ret;
3002 }
3003
3004 static void finish_ordered_fn(struct btrfs_work *work)
3005 {
3006         struct btrfs_ordered_extent *ordered_extent;
3007         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3008         btrfs_finish_ordered_io(ordered_extent);
3009 }
3010
3011 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3012                                 struct extent_state *state, int uptodate)
3013 {
3014         struct inode *inode = page->mapping->host;
3015         struct btrfs_root *root = BTRFS_I(inode)->root;
3016         struct btrfs_ordered_extent *ordered_extent = NULL;
3017         struct btrfs_workqueue *wq;
3018         btrfs_work_func_t func;
3019
3020         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3021
3022         ClearPagePrivate2(page);
3023         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3024                                             end - start + 1, uptodate))
3025                 return 0;
3026
3027         if (btrfs_is_free_space_inode(inode)) {
3028                 wq = root->fs_info->endio_freespace_worker;
3029                 func = btrfs_freespace_write_helper;
3030         } else {
3031                 wq = root->fs_info->endio_write_workers;
3032                 func = btrfs_endio_write_helper;
3033         }
3034
3035         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3036                         NULL);
3037         btrfs_queue_work(wq, &ordered_extent->work);
3038
3039         return 0;
3040 }
3041
3042 static int __readpage_endio_check(struct inode *inode,
3043                                   struct btrfs_io_bio *io_bio,
3044                                   int icsum, struct page *page,
3045                                   int pgoff, u64 start, size_t len)
3046 {
3047         char *kaddr;
3048         u32 csum_expected;
3049         u32 csum = ~(u32)0;
3050
3051         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3052
3053         kaddr = kmap_atomic(page);
3054         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3055         btrfs_csum_final(csum, (char *)&csum);
3056         if (csum != csum_expected)
3057                 goto zeroit;
3058
3059         kunmap_atomic(kaddr);
3060         return 0;
3061 zeroit:
3062         btrfs_warn_rl(BTRFS_I(inode)->root->fs_info,
3063                 "csum failed ino %llu off %llu csum %u expected csum %u",
3064                            btrfs_ino(inode), start, csum, csum_expected);
3065         memset(kaddr + pgoff, 1, len);
3066         flush_dcache_page(page);
3067         kunmap_atomic(kaddr);
3068         if (csum_expected == 0)
3069                 return 0;
3070         return -EIO;
3071 }
3072
3073 /*
3074  * when reads are done, we need to check csums to verify the data is correct
3075  * if there's a match, we allow the bio to finish.  If not, the code in
3076  * extent_io.c will try to find good copies for us.
3077  */
3078 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3079                                       u64 phy_offset, struct page *page,
3080                                       u64 start, u64 end, int mirror)
3081 {
3082         size_t offset = start - page_offset(page);
3083         struct inode *inode = page->mapping->host;
3084         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3085         struct btrfs_root *root = BTRFS_I(inode)->root;
3086
3087         if (PageChecked(page)) {
3088                 ClearPageChecked(page);
3089                 return 0;
3090         }
3091
3092         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3093                 return 0;
3094
3095         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3096             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3097                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
3098                                   GFP_NOFS);
3099                 return 0;
3100         }
3101
3102         phy_offset >>= inode->i_sb->s_blocksize_bits;
3103         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3104                                       start, (size_t)(end - start + 1));
3105 }
3106
3107 struct delayed_iput {
3108         struct list_head list;
3109         struct inode *inode;
3110 };
3111
3112 /* JDM: If this is fs-wide, why can't we add a pointer to
3113  * btrfs_inode instead and avoid the allocation? */
3114 void btrfs_add_delayed_iput(struct inode *inode)
3115 {
3116         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3117         struct delayed_iput *delayed;
3118
3119         if (atomic_add_unless(&inode->i_count, -1, 1))
3120                 return;
3121
3122         delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
3123         delayed->inode = inode;
3124
3125         spin_lock(&fs_info->delayed_iput_lock);
3126         list_add_tail(&delayed->list, &fs_info->delayed_iputs);
3127         spin_unlock(&fs_info->delayed_iput_lock);
3128 }
3129
3130 void btrfs_run_delayed_iputs(struct btrfs_root *root)
3131 {
3132         LIST_HEAD(list);
3133         struct btrfs_fs_info *fs_info = root->fs_info;
3134         struct delayed_iput *delayed;
3135         int empty;
3136
3137         spin_lock(&fs_info->delayed_iput_lock);
3138         empty = list_empty(&fs_info->delayed_iputs);
3139         spin_unlock(&fs_info->delayed_iput_lock);
3140         if (empty)
3141                 return;
3142
3143         down_read(&fs_info->delayed_iput_sem);
3144
3145         spin_lock(&fs_info->delayed_iput_lock);
3146         list_splice_init(&fs_info->delayed_iputs, &list);
3147         spin_unlock(&fs_info->delayed_iput_lock);
3148
3149         while (!list_empty(&list)) {
3150                 delayed = list_entry(list.next, struct delayed_iput, list);
3151                 list_del(&delayed->list);
3152                 iput(delayed->inode);
3153                 kfree(delayed);
3154         }
3155
3156         up_read(&root->fs_info->delayed_iput_sem);
3157 }
3158
3159 /*
3160  * This is called in transaction commit time. If there are no orphan
3161  * files in the subvolume, it removes orphan item and frees block_rsv
3162  * structure.
3163  */
3164 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3165                               struct btrfs_root *root)
3166 {
3167         struct btrfs_block_rsv *block_rsv;
3168         int ret;
3169
3170         if (atomic_read(&root->orphan_inodes) ||
3171             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3172                 return;
3173
3174         spin_lock(&root->orphan_lock);
3175         if (atomic_read(&root->orphan_inodes)) {
3176                 spin_unlock(&root->orphan_lock);
3177                 return;
3178         }
3179
3180         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3181                 spin_unlock(&root->orphan_lock);
3182                 return;
3183         }
3184
3185         block_rsv = root->orphan_block_rsv;
3186         root->orphan_block_rsv = NULL;
3187         spin_unlock(&root->orphan_lock);
3188
3189         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3190             btrfs_root_refs(&root->root_item) > 0) {
3191                 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
3192                                             root->root_key.objectid);
3193                 if (ret)
3194                         btrfs_abort_transaction(trans, root, ret);
3195                 else
3196                         clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3197                                   &root->state);
3198         }
3199
3200         if (block_rsv) {
3201                 WARN_ON(block_rsv->size > 0);
3202                 btrfs_free_block_rsv(root, block_rsv);
3203         }
3204 }
3205
3206 /*
3207  * This creates an orphan entry for the given inode in case something goes
3208  * wrong in the middle of an unlink/truncate.
3209  *
3210  * NOTE: caller of this function should reserve 5 units of metadata for
3211  *       this function.
3212  */
3213 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3214 {
3215         struct btrfs_root *root = BTRFS_I(inode)->root;
3216         struct btrfs_block_rsv *block_rsv = NULL;
3217         int reserve = 0;
3218         int insert = 0;
3219         int ret;
3220
3221         if (!root->orphan_block_rsv) {
3222                 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3223                 if (!block_rsv)
3224                         return -ENOMEM;
3225         }
3226
3227         spin_lock(&root->orphan_lock);
3228         if (!root->orphan_block_rsv) {
3229                 root->orphan_block_rsv = block_rsv;
3230         } else if (block_rsv) {
3231                 btrfs_free_block_rsv(root, block_rsv);
3232                 block_rsv = NULL;
3233         }
3234
3235         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3236                               &BTRFS_I(inode)->runtime_flags)) {
3237 #if 0
3238                 /*
3239                  * For proper ENOSPC handling, we should do orphan
3240                  * cleanup when mounting. But this introduces backward
3241                  * compatibility issue.
3242                  */
3243                 if (!xchg(&root->orphan_item_inserted, 1))
3244                         insert = 2;
3245                 else
3246                         insert = 1;
3247 #endif
3248                 insert = 1;
3249                 atomic_inc(&root->orphan_inodes);
3250         }
3251
3252         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3253                               &BTRFS_I(inode)->runtime_flags))
3254                 reserve = 1;
3255         spin_unlock(&root->orphan_lock);
3256
3257         /* grab metadata reservation from transaction handle */
3258         if (reserve) {
3259                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3260                 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3261         }
3262
3263         /* insert an orphan item to track this unlinked/truncated file */
3264         if (insert >= 1) {
3265                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3266                 if (ret) {
3267                         atomic_dec(&root->orphan_inodes);
3268                         if (reserve) {
3269                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3270                                           &BTRFS_I(inode)->runtime_flags);
3271                                 btrfs_orphan_release_metadata(inode);
3272                         }
3273                         if (ret != -EEXIST) {
3274                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3275                                           &BTRFS_I(inode)->runtime_flags);
3276                                 btrfs_abort_transaction(trans, root, ret);
3277                                 return ret;
3278                         }
3279                 }
3280                 ret = 0;
3281         }
3282
3283         /* insert an orphan item to track subvolume contains orphan files */
3284         if (insert >= 2) {
3285                 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3286                                                root->root_key.objectid);
3287                 if (ret && ret != -EEXIST) {
3288                         btrfs_abort_transaction(trans, root, ret);
3289                         return ret;
3290                 }
3291         }
3292         return 0;
3293 }
3294
3295 /*
3296  * We have done the truncate/delete so we can go ahead and remove the orphan
3297  * item for this particular inode.
3298  */
3299 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3300                             struct inode *inode)
3301 {
3302         struct btrfs_root *root = BTRFS_I(inode)->root;
3303         int delete_item = 0;
3304         int release_rsv = 0;
3305         int ret = 0;
3306
3307         spin_lock(&root->orphan_lock);
3308         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3309                                &BTRFS_I(inode)->runtime_flags))
3310                 delete_item = 1;
3311
3312         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3313                                &BTRFS_I(inode)->runtime_flags))
3314                 release_rsv = 1;
3315         spin_unlock(&root->orphan_lock);
3316
3317         if (delete_item) {
3318                 atomic_dec(&root->orphan_inodes);
3319                 if (trans)
3320                         ret = btrfs_del_orphan_item(trans, root,
3321                                                     btrfs_ino(inode));
3322         }
3323
3324         if (release_rsv)
3325                 btrfs_orphan_release_metadata(inode);
3326
3327         return ret;
3328 }
3329
3330 /*
3331  * this cleans up any orphans that may be left on the list from the last use
3332  * of this root.
3333  */
3334 int btrfs_orphan_cleanup(struct btrfs_root *root)
3335 {
3336         struct btrfs_path *path;
3337         struct extent_buffer *leaf;
3338         struct btrfs_key key, found_key;
3339         struct btrfs_trans_handle *trans;
3340         struct inode *inode;
3341         u64 last_objectid = 0;
3342         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3343
3344         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3345                 return 0;
3346
3347         path = btrfs_alloc_path();
3348         if (!path) {
3349                 ret = -ENOMEM;
3350                 goto out;
3351         }
3352         path->reada = -1;
3353
3354         key.objectid = BTRFS_ORPHAN_OBJECTID;
3355         key.type = BTRFS_ORPHAN_ITEM_KEY;
3356         key.offset = (u64)-1;
3357
3358         while (1) {
3359                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3360                 if (ret < 0)
3361                         goto out;
3362
3363                 /*
3364                  * if ret == 0 means we found what we were searching for, which
3365                  * is weird, but possible, so only screw with path if we didn't
3366                  * find the key and see if we have stuff that matches
3367                  */
3368                 if (ret > 0) {
3369                         ret = 0;
3370                         if (path->slots[0] == 0)
3371                                 break;
3372                         path->slots[0]--;
3373                 }
3374
3375                 /* pull out the item */
3376                 leaf = path->nodes[0];
3377                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3378
3379                 /* make sure the item matches what we want */
3380                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3381                         break;
3382                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3383                         break;
3384
3385                 /* release the path since we're done with it */
3386                 btrfs_release_path(path);
3387
3388                 /*
3389                  * this is where we are basically btrfs_lookup, without the
3390                  * crossing root thing.  we store the inode number in the
3391                  * offset of the orphan item.
3392                  */
3393
3394                 if (found_key.offset == last_objectid) {
3395                         btrfs_err(root->fs_info,
3396                                 "Error removing orphan entry, stopping orphan cleanup");
3397                         ret = -EINVAL;
3398                         goto out;
3399                 }
3400
3401                 last_objectid = found_key.offset;
3402
3403                 found_key.objectid = found_key.offset;
3404                 found_key.type = BTRFS_INODE_ITEM_KEY;
3405                 found_key.offset = 0;
3406                 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3407                 ret = PTR_ERR_OR_ZERO(inode);
3408                 if (ret && ret != -ESTALE)
3409                         goto out;
3410
3411                 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3412                         struct btrfs_root *dead_root;
3413                         struct btrfs_fs_info *fs_info = root->fs_info;
3414                         int is_dead_root = 0;
3415
3416                         /*
3417                          * this is an orphan in the tree root. Currently these
3418                          * could come from 2 sources:
3419                          *  a) a snapshot deletion in progress
3420                          *  b) a free space cache inode
3421                          * We need to distinguish those two, as the snapshot
3422                          * orphan must not get deleted.
3423                          * find_dead_roots already ran before us, so if this
3424                          * is a snapshot deletion, we should find the root
3425                          * in the dead_roots list
3426                          */
3427                         spin_lock(&fs_info->trans_lock);
3428                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3429                                             root_list) {
3430                                 if (dead_root->root_key.objectid ==
3431                                     found_key.objectid) {
3432                                         is_dead_root = 1;
3433                                         break;
3434                                 }
3435                         }
3436                         spin_unlock(&fs_info->trans_lock);
3437                         if (is_dead_root) {
3438                                 /* prevent this orphan from being found again */
3439                                 key.offset = found_key.objectid - 1;
3440                                 continue;
3441                         }
3442                 }
3443                 /*
3444                  * Inode is already gone but the orphan item is still there,
3445                  * kill the orphan item.
3446                  */
3447                 if (ret == -ESTALE) {
3448                         trans = btrfs_start_transaction(root, 1);
3449                         if (IS_ERR(trans)) {
3450                                 ret = PTR_ERR(trans);
3451                                 goto out;
3452                         }
3453                         btrfs_debug(root->fs_info, "auto deleting %Lu",
3454                                 found_key.objectid);
3455                         ret = btrfs_del_orphan_item(trans, root,
3456                                                     found_key.objectid);
3457                         btrfs_end_transaction(trans, root);
3458                         if (ret)
3459                                 goto out;
3460                         continue;
3461                 }
3462
3463                 /*
3464                  * add this inode to the orphan list so btrfs_orphan_del does
3465                  * the proper thing when we hit it
3466                  */
3467                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3468                         &BTRFS_I(inode)->runtime_flags);
3469                 atomic_inc(&root->orphan_inodes);
3470
3471                 /* if we have links, this was a truncate, lets do that */
3472                 if (inode->i_nlink) {
3473                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3474                                 iput(inode);
3475                                 continue;
3476                         }
3477                         nr_truncate++;
3478
3479                         /* 1 for the orphan item deletion. */
3480                         trans = btrfs_start_transaction(root, 1);
3481                         if (IS_ERR(trans)) {
3482                                 iput(inode);
3483                                 ret = PTR_ERR(trans);
3484                                 goto out;
3485                         }
3486                         ret = btrfs_orphan_add(trans, inode);
3487                         btrfs_end_transaction(trans, root);
3488                         if (ret) {
3489                                 iput(inode);
3490                                 goto out;
3491                         }
3492
3493                         ret = btrfs_truncate(inode);
3494                         if (ret)
3495                                 btrfs_orphan_del(NULL, inode);
3496                 } else {
3497                         nr_unlink++;
3498                 }
3499
3500                 /* this will do delete_inode and everything for us */
3501                 iput(inode);
3502                 if (ret)
3503                         goto out;
3504         }
3505         /* release the path since we're done with it */
3506         btrfs_release_path(path);
3507
3508         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3509
3510         if (root->orphan_block_rsv)
3511                 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3512                                         (u64)-1);
3513
3514         if (root->orphan_block_rsv ||
3515             test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3516                 trans = btrfs_join_transaction(root);
3517                 if (!IS_ERR(trans))
3518                         btrfs_end_transaction(trans, root);
3519         }
3520
3521         if (nr_unlink)
3522                 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3523         if (nr_truncate)
3524                 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3525
3526 out:
3527         if (ret)
3528                 btrfs_err(root->fs_info,
3529                         "could not do orphan cleanup %d", ret);
3530         btrfs_free_path(path);
3531         return ret;
3532 }
3533
3534 /*
3535  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3536  * don't find any xattrs, we know there can't be any acls.
3537  *
3538  * slot is the slot the inode is in, objectid is the objectid of the inode
3539  */
3540 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3541                                           int slot, u64 objectid,
3542                                           int *first_xattr_slot)
3543 {
3544         u32 nritems = btrfs_header_nritems(leaf);
3545         struct btrfs_key found_key;
3546         static u64 xattr_access = 0;
3547         static u64 xattr_default = 0;
3548         int scanned = 0;
3549
3550         if (!xattr_access) {
3551                 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3552                                         strlen(POSIX_ACL_XATTR_ACCESS));
3553                 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3554                                         strlen(POSIX_ACL_XATTR_DEFAULT));
3555         }
3556
3557         slot++;
3558         *first_xattr_slot = -1;
3559         while (slot < nritems) {
3560                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3561
3562                 /* we found a different objectid, there must not be acls */
3563                 if (found_key.objectid != objectid)
3564                         return 0;
3565
3566                 /* we found an xattr, assume we've got an acl */
3567                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3568                         if (*first_xattr_slot == -1)
3569                                 *first_xattr_slot = slot;
3570                         if (found_key.offset == xattr_access ||
3571                             found_key.offset == xattr_default)
3572                                 return 1;
3573                 }
3574
3575                 /*
3576                  * we found a key greater than an xattr key, there can't
3577                  * be any acls later on
3578                  */
3579                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3580                         return 0;
3581
3582                 slot++;
3583                 scanned++;
3584
3585                 /*
3586                  * it goes inode, inode backrefs, xattrs, extents,
3587                  * so if there are a ton of hard links to an inode there can
3588                  * be a lot of backrefs.  Don't waste time searching too hard,
3589                  * this is just an optimization
3590                  */
3591                 if (scanned >= 8)
3592                         break;
3593         }
3594         /* we hit the end of the leaf before we found an xattr or
3595          * something larger than an xattr.  We have to assume the inode
3596          * has acls
3597          */
3598         if (*first_xattr_slot == -1)
3599                 *first_xattr_slot = slot;
3600         return 1;
3601 }
3602
3603 /*
3604  * read an inode from the btree into the in-memory inode
3605  */
3606 static void btrfs_read_locked_inode(struct inode *inode)
3607 {
3608         struct btrfs_path *path;
3609         struct extent_buffer *leaf;
3610         struct btrfs_inode_item *inode_item;
3611         struct btrfs_root *root = BTRFS_I(inode)->root;
3612         struct btrfs_key location;
3613         unsigned long ptr;
3614         int maybe_acls;
3615         u32 rdev;
3616         int ret;
3617         bool filled = false;
3618         int first_xattr_slot;
3619
3620         ret = btrfs_fill_inode(inode, &rdev);
3621         if (!ret)
3622                 filled = true;
3623
3624         path = btrfs_alloc_path();
3625         if (!path)
3626                 goto make_bad;
3627
3628         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3629
3630         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3631         if (ret)
3632                 goto make_bad;
3633
3634         leaf = path->nodes[0];
3635
3636         if (filled)
3637                 goto cache_index;
3638
3639         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3640                                     struct btrfs_inode_item);
3641         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3642         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3643         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3644         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3645         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3646
3647         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3648         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3649
3650         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3651         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3652
3653         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3654         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3655
3656         BTRFS_I(inode)->i_otime.tv_sec =
3657                 btrfs_timespec_sec(leaf, &inode_item->otime);
3658         BTRFS_I(inode)->i_otime.tv_nsec =
3659                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3660
3661         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3662         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3663         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3664
3665         inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3666         inode->i_generation = BTRFS_I(inode)->generation;
3667         inode->i_rdev = 0;
3668         rdev = btrfs_inode_rdev(leaf, inode_item);
3669
3670         BTRFS_I(inode)->index_cnt = (u64)-1;
3671         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3672
3673 cache_index:
3674         /*
3675          * If we were modified in the current generation and evicted from memory
3676          * and then re-read we need to do a full sync since we don't have any
3677          * idea about which extents were modified before we were evicted from
3678          * cache.
3679          *
3680          * This is required for both inode re-read from disk and delayed inode
3681          * in delayed_nodes_tree.
3682          */
3683         if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3684                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3685                         &BTRFS_I(inode)->runtime_flags);
3686
3687         /*
3688          * We don't persist the id of the transaction where an unlink operation
3689          * against the inode was last made. So here we assume the inode might
3690          * have been evicted, and therefore the exact value of last_unlink_trans
3691          * lost, and set it to last_trans to avoid metadata inconsistencies
3692          * between the inode and its parent if the inode is fsync'ed and the log
3693          * replayed. For example, in the scenario:
3694          *
3695          * touch mydir/foo
3696          * ln mydir/foo mydir/bar
3697          * sync
3698          * unlink mydir/bar
3699          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3700          * xfs_io -c fsync mydir/foo
3701          * <power failure>
3702          * mount fs, triggers fsync log replay
3703          *
3704          * We must make sure that when we fsync our inode foo we also log its
3705          * parent inode, otherwise after log replay the parent still has the
3706          * dentry with the "bar" name but our inode foo has a link count of 1
3707          * and doesn't have an inode ref with the name "bar" anymore.
3708          *
3709          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3710          * but it guarantees correctness at the expense of ocassional full
3711          * transaction commits on fsync if our inode is a directory, or if our
3712          * inode is not a directory, logging its parent unnecessarily.
3713          */
3714         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3715
3716         path->slots[0]++;
3717         if (inode->i_nlink != 1 ||
3718             path->slots[0] >= btrfs_header_nritems(leaf))
3719                 goto cache_acl;
3720
3721         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3722         if (location.objectid != btrfs_ino(inode))
3723                 goto cache_acl;
3724
3725         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3726         if (location.type == BTRFS_INODE_REF_KEY) {
3727                 struct btrfs_inode_ref *ref;
3728
3729                 ref = (struct btrfs_inode_ref *)ptr;
3730                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3731         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3732                 struct btrfs_inode_extref *extref;
3733
3734                 extref = (struct btrfs_inode_extref *)ptr;
3735                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3736                                                                      extref);
3737         }
3738 cache_acl:
3739         /*
3740          * try to precache a NULL acl entry for files that don't have
3741          * any xattrs or acls
3742          */
3743         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3744                                            btrfs_ino(inode), &first_xattr_slot);
3745         if (first_xattr_slot != -1) {
3746                 path->slots[0] = first_xattr_slot;
3747                 ret = btrfs_load_inode_props(inode, path);
3748                 if (ret)
3749                         btrfs_err(root->fs_info,
3750                                   "error loading props for ino %llu (root %llu): %d",
3751                                   btrfs_ino(inode),
3752                                   root->root_key.objectid, ret);
3753         }
3754         btrfs_free_path(path);
3755
3756         if (!maybe_acls)
3757                 cache_no_acl(inode);
3758
3759         switch (inode->i_mode & S_IFMT) {
3760         case S_IFREG:
3761                 inode->i_mapping->a_ops = &btrfs_aops;
3762                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3763                 inode->i_fop = &btrfs_file_operations;
3764                 inode->i_op = &btrfs_file_inode_operations;
3765                 break;
3766         case S_IFDIR:
3767                 inode->i_fop = &btrfs_dir_file_operations;
3768                 if (root == root->fs_info->tree_root)
3769                         inode->i_op = &btrfs_dir_ro_inode_operations;
3770                 else
3771                         inode->i_op = &btrfs_dir_inode_operations;
3772                 break;
3773         case S_IFLNK:
3774                 inode->i_op = &btrfs_symlink_inode_operations;
3775                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3776                 break;
3777         default:
3778                 inode->i_op = &btrfs_special_inode_operations;
3779                 init_special_inode(inode, inode->i_mode, rdev);
3780                 break;
3781         }
3782
3783         btrfs_update_iflags(inode);
3784         return;
3785
3786 make_bad:
3787         btrfs_free_path(path);
3788         make_bad_inode(inode);
3789 }
3790
3791 /*
3792  * given a leaf and an inode, copy the inode fields into the leaf
3793  */
3794 static void fill_inode_item(struct btrfs_trans_handle *trans,
3795                             struct extent_buffer *leaf,
3796                             struct btrfs_inode_item *item,
3797                             struct inode *inode)
3798 {
3799         struct btrfs_map_token token;
3800
3801         btrfs_init_map_token(&token);
3802
3803         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3804         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3805         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3806                                    &token);
3807         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3808         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3809
3810         btrfs_set_token_timespec_sec(leaf, &item->atime,
3811                                      inode->i_atime.tv_sec, &token);
3812         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3813                                       inode->i_atime.tv_nsec, &token);
3814
3815         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3816                                      inode->i_mtime.tv_sec, &token);
3817         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3818                                       inode->i_mtime.tv_nsec, &token);
3819
3820         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3821                                      inode->i_ctime.tv_sec, &token);
3822         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3823                                       inode->i_ctime.tv_nsec, &token);
3824
3825         btrfs_set_token_timespec_sec(leaf, &item->otime,
3826                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3827         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3828                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3829
3830         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3831                                      &token);
3832         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3833                                          &token);
3834         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3835         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3836         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3837         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3838         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3839 }
3840
3841 /*
3842  * copy everything in the in-memory inode into the btree.
3843  */
3844 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3845                                 struct btrfs_root *root, struct inode *inode)
3846 {
3847         struct btrfs_inode_item *inode_item;
3848         struct btrfs_path *path;
3849         struct extent_buffer *leaf;
3850         int ret;
3851
3852         path = btrfs_alloc_path();
3853         if (!path)
3854                 return -ENOMEM;
3855
3856         path->leave_spinning = 1;
3857         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3858                                  1);
3859         if (ret) {
3860                 if (ret > 0)
3861                         ret = -ENOENT;
3862                 goto failed;
3863         }
3864
3865         leaf = path->nodes[0];
3866         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3867                                     struct btrfs_inode_item);
3868
3869         fill_inode_item(trans, leaf, inode_item, inode);
3870         btrfs_mark_buffer_dirty(leaf);
3871         btrfs_set_inode_last_trans(trans, inode);
3872         ret = 0;
3873 failed:
3874         btrfs_free_path(path);
3875         return ret;
3876 }
3877
3878 /*
3879  * copy everything in the in-memory inode into the btree.
3880  */
3881 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3882                                 struct btrfs_root *root, struct inode *inode)
3883 {
3884         int ret;
3885
3886         /*
3887          * If the inode is a free space inode, we can deadlock during commit
3888          * if we put it into the delayed code.
3889          *
3890          * The data relocation inode should also be directly updated
3891          * without delay
3892          */
3893         if (!btrfs_is_free_space_inode(inode)
3894             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3895             && !root->fs_info->log_root_recovering) {
3896                 btrfs_update_root_times(trans, root);
3897
3898                 ret = btrfs_delayed_update_inode(trans, root, inode);
3899                 if (!ret)
3900                         btrfs_set_inode_last_trans(trans, inode);
3901                 return ret;
3902         }
3903
3904         return btrfs_update_inode_item(trans, root, inode);
3905 }
3906
3907 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3908                                          struct btrfs_root *root,
3909                                          struct inode *inode)
3910 {
3911         int ret;
3912
3913         ret = btrfs_update_inode(trans, root, inode);
3914         if (ret == -ENOSPC)
3915                 return btrfs_update_inode_item(trans, root, inode);
3916         return ret;
3917 }
3918
3919 /*
3920  * unlink helper that gets used here in inode.c and in the tree logging
3921  * recovery code.  It remove a link in a directory with a given name, and
3922  * also drops the back refs in the inode to the directory
3923  */
3924 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3925                                 struct btrfs_root *root,
3926                                 struct inode *dir, struct inode *inode,
3927                                 const char *name, int name_len)
3928 {
3929         struct btrfs_path *path;
3930         int ret = 0;
3931         struct extent_buffer *leaf;
3932         struct btrfs_dir_item *di;
3933         struct btrfs_key key;
3934         u64 index;
3935         u64 ino = btrfs_ino(inode);
3936         u64 dir_ino = btrfs_ino(dir);
3937
3938         path = btrfs_alloc_path();
3939         if (!path) {
3940                 ret = -ENOMEM;
3941                 goto out;
3942         }
3943
3944         path->leave_spinning = 1;
3945         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3946                                     name, name_len, -1);
3947         if (IS_ERR(di)) {
3948                 ret = PTR_ERR(di);
3949                 goto err;
3950         }
3951         if (!di) {
3952                 ret = -ENOENT;
3953                 goto err;
3954         }
3955         leaf = path->nodes[0];
3956         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3957         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3958         if (ret)
3959                 goto err;
3960         btrfs_release_path(path);
3961
3962         /*
3963          * If we don't have dir index, we have to get it by looking up
3964          * the inode ref, since we get the inode ref, remove it directly,
3965          * it is unnecessary to do delayed deletion.
3966          *
3967          * But if we have dir index, needn't search inode ref to get it.
3968          * Since the inode ref is close to the inode item, it is better
3969          * that we delay to delete it, and just do this deletion when
3970          * we update the inode item.
3971          */
3972         if (BTRFS_I(inode)->dir_index) {
3973                 ret = btrfs_delayed_delete_inode_ref(inode);
3974                 if (!ret) {
3975                         index = BTRFS_I(inode)->dir_index;
3976                         goto skip_backref;
3977                 }
3978         }
3979
3980         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3981                                   dir_ino, &index);
3982         if (ret) {
3983                 btrfs_info(root->fs_info,
3984                         "failed to delete reference to %.*s, inode %llu parent %llu",
3985                         name_len, name, ino, dir_ino);
3986                 btrfs_abort_transaction(trans, root, ret);
3987                 goto err;
3988         }
3989 skip_backref:
3990         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3991         if (ret) {
3992                 btrfs_abort_transaction(trans, root, ret);
3993                 goto err;
3994         }
3995
3996         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
3997                                          inode, dir_ino);
3998         if (ret != 0 && ret != -ENOENT) {
3999                 btrfs_abort_transaction(trans, root, ret);
4000                 goto err;
4001         }
4002
4003         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
4004                                            dir, index);
4005         if (ret == -ENOENT)
4006                 ret = 0;
4007         else if (ret)
4008                 btrfs_abort_transaction(trans, root, ret);
4009 err:
4010         btrfs_free_path(path);
4011         if (ret)
4012                 goto out;
4013
4014         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4015         inode_inc_iversion(inode);
4016         inode_inc_iversion(dir);
4017         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4018         ret = btrfs_update_inode(trans, root, dir);
4019 out:
4020         return ret;
4021 }
4022
4023 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4024                        struct btrfs_root *root,
4025                        struct inode *dir, struct inode *inode,
4026                        const char *name, int name_len)
4027 {
4028         int ret;
4029         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4030         if (!ret) {
4031                 drop_nlink(inode);
4032                 ret = btrfs_update_inode(trans, root, inode);
4033         }
4034         return ret;
4035 }
4036
4037 /*
4038  * helper to start transaction for unlink and rmdir.
4039  *
4040  * unlink and rmdir are special in btrfs, they do not always free space, so
4041  * if we cannot make our reservations the normal way try and see if there is
4042  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4043  * allow the unlink to occur.
4044  */
4045 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4046 {
4047         struct btrfs_trans_handle *trans;
4048         struct btrfs_root *root = BTRFS_I(dir)->root;
4049         int ret;
4050
4051         /*
4052          * 1 for the possible orphan item
4053          * 1 for the dir item
4054          * 1 for the dir index
4055          * 1 for the inode ref
4056          * 1 for the inode
4057          */
4058         trans = btrfs_start_transaction(root, 5);
4059         if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
4060                 return trans;
4061
4062         if (PTR_ERR(trans) == -ENOSPC) {
4063                 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4064
4065                 trans = btrfs_start_transaction(root, 0);
4066                 if (IS_ERR(trans))
4067                         return trans;
4068                 ret = btrfs_cond_migrate_bytes(root->fs_info,
4069                                                &root->fs_info->trans_block_rsv,
4070                                                num_bytes, 5);
4071                 if (ret) {
4072                         btrfs_end_transaction(trans, root);
4073                         return ERR_PTR(ret);
4074                 }
4075                 trans->block_rsv = &root->fs_info->trans_block_rsv;
4076                 trans->bytes_reserved = num_bytes;
4077         }
4078         return trans;
4079 }
4080
4081 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4082 {
4083         struct btrfs_root *root = BTRFS_I(dir)->root;
4084         struct btrfs_trans_handle *trans;
4085         struct inode *inode = d_inode(dentry);
4086         int ret;
4087
4088         trans = __unlink_start_trans(dir);
4089         if (IS_ERR(trans))
4090                 return PTR_ERR(trans);
4091
4092         btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
4093
4094         ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4095                                  dentry->d_name.name, dentry->d_name.len);
4096         if (ret)
4097                 goto out;
4098
4099         if (inode->i_nlink == 0) {
4100                 ret = btrfs_orphan_add(trans, inode);
4101                 if (ret)
4102                         goto out;
4103         }
4104
4105 out:
4106         btrfs_end_transaction(trans, root);
4107         btrfs_btree_balance_dirty(root);
4108         return ret;
4109 }
4110
4111 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4112                         struct btrfs_root *root,
4113                         struct inode *dir, u64 objectid,
4114                         const char *name, int name_len)
4115 {
4116         struct btrfs_path *path;
4117         struct extent_buffer *leaf;
4118         struct btrfs_dir_item *di;
4119         struct btrfs_key key;
4120         u64 index;
4121         int ret;
4122         u64 dir_ino = btrfs_ino(dir);
4123
4124         path = btrfs_alloc_path();
4125         if (!path)
4126                 return -ENOMEM;
4127
4128         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4129                                    name, name_len, -1);
4130         if (IS_ERR_OR_NULL(di)) {
4131                 if (!di)
4132                         ret = -ENOENT;
4133                 else
4134                         ret = PTR_ERR(di);
4135                 goto out;
4136         }
4137
4138         leaf = path->nodes[0];
4139         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4140         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4141         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4142         if (ret) {
4143                 btrfs_abort_transaction(trans, root, ret);
4144                 goto out;
4145         }
4146         btrfs_release_path(path);
4147
4148         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4149                                  objectid, root->root_key.objectid,
4150                                  dir_ino, &index, name, name_len);
4151         if (ret < 0) {
4152                 if (ret != -ENOENT) {
4153                         btrfs_abort_transaction(trans, root, ret);
4154                         goto out;
4155                 }
4156                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4157                                                  name, name_len);
4158                 if (IS_ERR_OR_NULL(di)) {
4159                         if (!di)
4160                                 ret = -ENOENT;
4161                         else
4162                                 ret = PTR_ERR(di);
4163                         btrfs_abort_transaction(trans, root, ret);
4164                         goto out;
4165                 }
4166
4167                 leaf = path->nodes[0];
4168                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4169                 btrfs_release_path(path);
4170                 index = key.offset;
4171         }
4172         btrfs_release_path(path);
4173
4174         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4175         if (ret) {
4176                 btrfs_abort_transaction(trans, root, ret);
4177                 goto out;
4178         }
4179
4180         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4181         inode_inc_iversion(dir);
4182         dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4183         ret = btrfs_update_inode_fallback(trans, root, dir);
4184         if (ret)
4185                 btrfs_abort_transaction(trans, root, ret);
4186 out:
4187         btrfs_free_path(path);
4188         return ret;
4189 }
4190
4191 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4192 {
4193         struct inode *inode = d_inode(dentry);
4194         int err = 0;
4195         struct btrfs_root *root = BTRFS_I(dir)->root;
4196         struct btrfs_trans_handle *trans;
4197
4198         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4199                 return -ENOTEMPTY;
4200         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
4201                 return -EPERM;
4202
4203         trans = __unlink_start_trans(dir);
4204         if (IS_ERR(trans))
4205                 return PTR_ERR(trans);
4206
4207         if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4208                 err = btrfs_unlink_subvol(trans, root, dir,
4209                                           BTRFS_I(inode)->location.objectid,
4210                                           dentry->d_name.name,
4211                                           dentry->d_name.len);
4212                 goto out;
4213         }
4214
4215         err = btrfs_orphan_add(trans, inode);
4216         if (err)
4217                 goto out;
4218
4219         /* now the directory is empty */
4220         err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4221                                  dentry->d_name.name, dentry->d_name.len);
4222         if (!err)
4223                 btrfs_i_size_write(inode, 0);
4224 out:
4225         btrfs_end_transaction(trans, root);
4226         btrfs_btree_balance_dirty(root);
4227
4228         return err;
4229 }
4230
4231 static int truncate_space_check(struct btrfs_trans_handle *trans,
4232                                 struct btrfs_root *root,
4233                                 u64 bytes_deleted)
4234 {
4235         int ret;
4236
4237         bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
4238         ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
4239                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4240         if (!ret)
4241                 trans->bytes_reserved += bytes_deleted;
4242         return ret;
4243
4244 }
4245
4246 static int truncate_inline_extent(struct inode *inode,
4247                                   struct btrfs_path *path,
4248                                   struct btrfs_key *found_key,
4249                                   const u64 item_end,
4250                                   const u64 new_size)
4251 {
4252         struct extent_buffer *leaf = path->nodes[0];
4253         int slot = path->slots[0];
4254         struct btrfs_file_extent_item *fi;
4255         u32 size = (u32)(new_size - found_key->offset);
4256         struct btrfs_root *root = BTRFS_I(inode)->root;
4257
4258         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4259
4260         if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4261                 loff_t offset = new_size;
4262                 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
4263
4264                 /*
4265                  * Zero out the remaining of the last page of our inline extent,
4266                  * instead of directly truncating our inline extent here - that
4267                  * would be much more complex (decompressing all the data, then
4268                  * compressing the truncated data, which might be bigger than
4269                  * the size of the inline extent, resize the extent, etc).
4270                  * We release the path because to get the page we might need to
4271                  * read the extent item from disk (data not in the page cache).
4272                  */
4273                 btrfs_release_path(path);
4274                 return btrfs_truncate_page(inode, offset, page_end - offset, 0);
4275         }
4276
4277         btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4278         size = btrfs_file_extent_calc_inline_size(size);
4279         btrfs_truncate_item(root, path, size, 1);
4280
4281         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4282                 inode_sub_bytes(inode, item_end + 1 - new_size);
4283
4284         return 0;
4285 }
4286
4287 /*
4288  * this can truncate away extent items, csum items and directory items.
4289  * It starts at a high offset and removes keys until it can't find
4290  * any higher than new_size
4291  *
4292  * csum items that cross the new i_size are truncated to the new size
4293  * as well.
4294  *
4295  * min_type is the minimum key type to truncate down to.  If set to 0, this
4296  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4297  */
4298 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4299                                struct btrfs_root *root,
4300                                struct inode *inode,
4301                                u64 new_size, u32 min_type)
4302 {
4303         struct btrfs_path *path;
4304         struct extent_buffer *leaf;
4305         struct btrfs_file_extent_item *fi;
4306         struct btrfs_key key;
4307         struct btrfs_key found_key;
4308         u64 extent_start = 0;
4309         u64 extent_num_bytes = 0;
4310         u64 extent_offset = 0;
4311         u64 item_end = 0;
4312         u64 last_size = new_size;
4313         u32 found_type = (u8)-1;
4314         int found_extent;
4315         int del_item;
4316         int pending_del_nr = 0;
4317         int pending_del_slot = 0;
4318         int extent_type = -1;
4319         int ret;
4320         int err = 0;
4321         u64 ino = btrfs_ino(inode);
4322         u64 bytes_deleted = 0;
4323         bool be_nice = 0;
4324         bool should_throttle = 0;
4325         bool should_end = 0;
4326
4327         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4328
4329         /*
4330          * for non-free space inodes and ref cows, we want to back off from
4331          * time to time
4332          */
4333         if (!btrfs_is_free_space_inode(inode) &&
4334             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4335                 be_nice = 1;
4336
4337         path = btrfs_alloc_path();
4338         if (!path)
4339                 return -ENOMEM;
4340         path->reada = -1;
4341
4342         /*
4343          * We want to drop from the next block forward in case this new size is
4344          * not block aligned since we will be keeping the last block of the
4345          * extent just the way it is.
4346          */
4347         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4348             root == root->fs_info->tree_root)
4349                 btrfs_drop_extent_cache(inode, ALIGN(new_size,
4350                                         root->sectorsize), (u64)-1, 0);
4351
4352         /*
4353          * This function is also used to drop the items in the log tree before
4354          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4355          * it is used to drop the loged items. So we shouldn't kill the delayed
4356          * items.
4357          */
4358         if (min_type == 0 && root == BTRFS_I(inode)->root)
4359                 btrfs_kill_delayed_inode_items(inode);
4360
4361         key.objectid = ino;
4362         key.offset = (u64)-1;
4363         key.type = (u8)-1;
4364
4365 search_again:
4366         /*
4367          * with a 16K leaf size and 128MB extents, you can actually queue
4368          * up a huge file in a single leaf.  Most of the time that
4369          * bytes_deleted is > 0, it will be huge by the time we get here
4370          */
4371         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4372                 if (btrfs_should_end_transaction(trans, root)) {
4373                         err = -EAGAIN;
4374                         goto error;
4375                 }
4376         }
4377
4378
4379         path->leave_spinning = 1;
4380         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4381         if (ret < 0) {
4382                 err = ret;
4383                 goto out;
4384         }
4385
4386         if (ret > 0) {
4387                 /* there are no items in the tree for us to truncate, we're
4388                  * done
4389                  */
4390                 if (path->slots[0] == 0)
4391                         goto out;
4392                 path->slots[0]--;
4393         }
4394
4395         while (1) {
4396                 fi = NULL;
4397                 leaf = path->nodes[0];
4398                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4399                 found_type = found_key.type;
4400
4401                 if (found_key.objectid != ino)
4402                         break;
4403
4404                 if (found_type < min_type)
4405                         break;
4406
4407                 item_end = found_key.offset;
4408                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4409                         fi = btrfs_item_ptr(leaf, path->slots[0],
4410                                             struct btrfs_file_extent_item);
4411                         extent_type = btrfs_file_extent_type(leaf, fi);
4412                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4413                                 item_end +=
4414                                     btrfs_file_extent_num_bytes(leaf, fi);
4415                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4416                                 item_end += btrfs_file_extent_inline_len(leaf,
4417                                                          path->slots[0], fi);
4418                         }
4419                         item_end--;
4420                 }
4421                 if (found_type > min_type) {
4422                         del_item = 1;
4423                 } else {
4424                         if (item_end < new_size)
4425                                 break;
4426                         if (found_key.offset >= new_size)
4427                                 del_item = 1;
4428                         else
4429                                 del_item = 0;
4430                 }
4431                 found_extent = 0;
4432                 /* FIXME, shrink the extent if the ref count is only 1 */
4433                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4434                         goto delete;
4435
4436                 if (del_item)
4437                         last_size = found_key.offset;
4438                 else
4439                         last_size = new_size;
4440
4441                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4442                         u64 num_dec;
4443                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4444                         if (!del_item) {
4445                                 u64 orig_num_bytes =
4446                                         btrfs_file_extent_num_bytes(leaf, fi);
4447                                 extent_num_bytes = ALIGN(new_size -
4448                                                 found_key.offset,
4449                                                 root->sectorsize);
4450                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4451                                                          extent_num_bytes);
4452                                 num_dec = (orig_num_bytes -
4453                                            extent_num_bytes);
4454                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4455                                              &root->state) &&
4456                                     extent_start != 0)
4457                                         inode_sub_bytes(inode, num_dec);
4458                                 btrfs_mark_buffer_dirty(leaf);
4459                         } else {
4460                                 extent_num_bytes =
4461                                         btrfs_file_extent_disk_num_bytes(leaf,
4462                                                                          fi);
4463                                 extent_offset = found_key.offset -
4464                                         btrfs_file_extent_offset(leaf, fi);
4465
4466                                 /* FIXME blocksize != 4096 */
4467                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4468                                 if (extent_start != 0) {
4469                                         found_extent = 1;
4470                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4471                                                      &root->state))
4472                                                 inode_sub_bytes(inode, num_dec);
4473                                 }
4474                         }
4475                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4476                         /*
4477                          * we can't truncate inline items that have had
4478                          * special encodings
4479                          */
4480                         if (!del_item &&
4481                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4482                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4483
4484                                 /*
4485                                  * Need to release path in order to truncate a
4486                                  * compressed extent. So delete any accumulated
4487                                  * extent items so far.
4488                                  */
4489                                 if (btrfs_file_extent_compression(leaf, fi) !=
4490                                     BTRFS_COMPRESS_NONE && pending_del_nr) {
4491                                         err = btrfs_del_items(trans, root, path,
4492                                                               pending_del_slot,
4493                                                               pending_del_nr);
4494                                         if (err) {
4495                                                 btrfs_abort_transaction(trans,
4496                                                                         root,
4497                                                                         err);
4498                                                 goto error;
4499                                         }
4500                                         pending_del_nr = 0;
4501                                 }
4502
4503                                 err = truncate_inline_extent(inode, path,
4504                                                              &found_key,
4505                                                              item_end,
4506                                                              new_size);
4507                                 if (err) {
4508                                         btrfs_abort_transaction(trans,
4509                                                                 root, err);
4510                                         goto error;
4511                                 }
4512                         } else if (test_bit(BTRFS_ROOT_REF_COWS,
4513                                             &root->state)) {
4514                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4515                         }
4516                 }
4517 delete:
4518                 if (del_item) {
4519                         if (!pending_del_nr) {
4520                                 /* no pending yet, add ourselves */
4521                                 pending_del_slot = path->slots[0];
4522                                 pending_del_nr = 1;
4523                         } else if (pending_del_nr &&
4524                                    path->slots[0] + 1 == pending_del_slot) {
4525                                 /* hop on the pending chunk */
4526                                 pending_del_nr++;
4527                                 pending_del_slot = path->slots[0];
4528                         } else {
4529                                 BUG();
4530                         }
4531                 } else {
4532                         break;
4533                 }
4534                 should_throttle = 0;
4535
4536                 if (found_extent &&
4537                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4538                      root == root->fs_info->tree_root)) {
4539                         btrfs_set_path_blocking(path);
4540                         bytes_deleted += extent_num_bytes;
4541                         ret = btrfs_free_extent(trans, root, extent_start,
4542                                                 extent_num_bytes, 0,
4543                                                 btrfs_header_owner(leaf),
4544                                                 ino, extent_offset, 0);
4545                         BUG_ON(ret);
4546                         if (btrfs_should_throttle_delayed_refs(trans, root))
4547                                 btrfs_async_run_delayed_refs(root,
4548                                         trans->delayed_ref_updates * 2, 0);
4549                         if (be_nice) {
4550                                 if (truncate_space_check(trans, root,
4551                                                          extent_num_bytes)) {
4552                                         should_end = 1;
4553                                 }
4554                                 if (btrfs_should_throttle_delayed_refs(trans,
4555                                                                        root)) {
4556                                         should_throttle = 1;
4557                                 }
4558                         }
4559                 }
4560
4561                 if (found_type == BTRFS_INODE_ITEM_KEY)
4562                         break;
4563
4564                 if (path->slots[0] == 0 ||
4565                     path->slots[0] != pending_del_slot ||
4566                     should_throttle || should_end) {
4567                         if (pending_del_nr) {
4568                                 ret = btrfs_del_items(trans, root, path,
4569                                                 pending_del_slot,
4570                                                 pending_del_nr);
4571                                 if (ret) {
4572                                         btrfs_abort_transaction(trans,
4573                                                                 root, ret);
4574                                         goto error;
4575                                 }
4576                                 pending_del_nr = 0;
4577                         }
4578                         btrfs_release_path(path);
4579                         if (should_throttle) {
4580                                 unsigned long updates = trans->delayed_ref_updates;
4581                                 if (updates) {
4582                                         trans->delayed_ref_updates = 0;
4583                                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4584                                         if (ret && !err)
4585                                                 err = ret;
4586                                 }
4587                         }
4588                         /*
4589                          * if we failed to refill our space rsv, bail out
4590                          * and let the transaction restart
4591                          */
4592                         if (should_end) {
4593                                 err = -EAGAIN;
4594                                 goto error;
4595                         }
4596                         goto search_again;
4597                 } else {
4598                         path->slots[0]--;
4599                 }
4600         }
4601 out:
4602         if (pending_del_nr) {
4603                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4604                                       pending_del_nr);
4605                 if (ret)
4606                         btrfs_abort_transaction(trans, root, ret);
4607         }
4608 error:
4609         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4610                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4611
4612         btrfs_free_path(path);
4613
4614         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4615                 unsigned long updates = trans->delayed_ref_updates;
4616                 if (updates) {
4617                         trans->delayed_ref_updates = 0;
4618                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4619                         if (ret && !err)
4620                                 err = ret;
4621                 }
4622         }
4623         return err;
4624 }
4625
4626 /*
4627  * btrfs_truncate_page - read, zero a chunk and write a page
4628  * @inode - inode that we're zeroing
4629  * @from - the offset to start zeroing
4630  * @len - the length to zero, 0 to zero the entire range respective to the
4631  *      offset
4632  * @front - zero up to the offset instead of from the offset on
4633  *
4634  * This will find the page for the "from" offset and cow the page and zero the
4635  * part we want to zero.  This is used with truncate and hole punching.
4636  */
4637 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4638                         int front)
4639 {
4640         struct address_space *mapping = inode->i_mapping;
4641         struct btrfs_root *root = BTRFS_I(inode)->root;
4642         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4643         struct btrfs_ordered_extent *ordered;
4644         struct extent_state *cached_state = NULL;
4645         char *kaddr;
4646         u32 blocksize = root->sectorsize;
4647         pgoff_t index = from >> PAGE_CACHE_SHIFT;
4648         unsigned offset = from & (PAGE_CACHE_SIZE-1);
4649         struct page *page;
4650         gfp_t mask = btrfs_alloc_write_mask(mapping);
4651         int ret = 0;
4652         u64 page_start;
4653         u64 page_end;
4654
4655         if ((offset & (blocksize - 1)) == 0 &&
4656             (!len || ((len & (blocksize - 1)) == 0)))
4657                 goto out;
4658         ret = btrfs_delalloc_reserve_space(inode,
4659                         round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE);
4660         if (ret)
4661                 goto out;
4662
4663 again:
4664         page = find_or_create_page(mapping, index, mask);
4665         if (!page) {
4666                 btrfs_delalloc_release_space(inode,
4667                                 round_down(from, PAGE_CACHE_SIZE),
4668                                 PAGE_CACHE_SIZE);
4669                 ret = -ENOMEM;
4670                 goto out;
4671         }
4672
4673         page_start = page_offset(page);
4674         page_end = page_start + PAGE_CACHE_SIZE - 1;
4675
4676         if (!PageUptodate(page)) {
4677                 ret = btrfs_readpage(NULL, page);
4678                 lock_page(page);
4679                 if (page->mapping != mapping) {
4680                         unlock_page(page);
4681                         page_cache_release(page);
4682                         goto again;
4683                 }
4684                 if (!PageUptodate(page)) {
4685                         ret = -EIO;
4686                         goto out_unlock;
4687                 }
4688         }
4689         wait_on_page_writeback(page);
4690
4691         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4692         set_page_extent_mapped(page);
4693
4694         ordered = btrfs_lookup_ordered_extent(inode, page_start);
4695         if (ordered) {
4696                 unlock_extent_cached(io_tree, page_start, page_end,
4697                                      &cached_state, GFP_NOFS);
4698                 unlock_page(page);
4699                 page_cache_release(page);
4700                 btrfs_start_ordered_extent(inode, ordered, 1);
4701                 btrfs_put_ordered_extent(ordered);
4702                 goto again;
4703         }
4704
4705         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4706                           EXTENT_DIRTY | EXTENT_DELALLOC |
4707                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4708                           0, 0, &cached_state, GFP_NOFS);
4709
4710         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4711                                         &cached_state);
4712         if (ret) {
4713                 unlock_extent_cached(io_tree, page_start, page_end,
4714                                      &cached_state, GFP_NOFS);
4715                 goto out_unlock;
4716         }
4717
4718         if (offset != PAGE_CACHE_SIZE) {
4719                 if (!len)
4720                         len = PAGE_CACHE_SIZE - offset;
4721                 kaddr = kmap(page);
4722                 if (front)
4723                         memset(kaddr, 0, offset);
4724                 else
4725                         memset(kaddr + offset, 0, len);
4726                 flush_dcache_page(page);
4727                 kunmap(page);
4728         }
4729         ClearPageChecked(page);
4730         set_page_dirty(page);
4731         unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4732                              GFP_NOFS);
4733
4734 out_unlock:
4735         if (ret)
4736                 btrfs_delalloc_release_space(inode, page_start,
4737                                              PAGE_CACHE_SIZE);
4738         unlock_page(page);
4739         page_cache_release(page);
4740 out:
4741         return ret;
4742 }
4743
4744 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4745                              u64 offset, u64 len)
4746 {
4747         struct btrfs_trans_handle *trans;
4748         int ret;
4749
4750         /*
4751          * Still need to make sure the inode looks like it's been updated so
4752          * that any holes get logged if we fsync.
4753          */
4754         if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4755                 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4756                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4757                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4758                 return 0;
4759         }
4760
4761         /*
4762          * 1 - for the one we're dropping
4763          * 1 - for the one we're adding
4764          * 1 - for updating the inode.
4765          */
4766         trans = btrfs_start_transaction(root, 3);
4767         if (IS_ERR(trans))
4768                 return PTR_ERR(trans);
4769
4770         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4771         if (ret) {
4772                 btrfs_abort_transaction(trans, root, ret);
4773                 btrfs_end_transaction(trans, root);
4774                 return ret;
4775         }
4776
4777         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4778                                        0, 0, len, 0, len, 0, 0, 0);
4779         if (ret)
4780                 btrfs_abort_transaction(trans, root, ret);
4781         else
4782                 btrfs_update_inode(trans, root, inode);
4783         btrfs_end_transaction(trans, root);
4784         return ret;
4785 }
4786
4787 /*
4788  * This function puts in dummy file extents for the area we're creating a hole
4789  * for.  So if we are truncating this file to a larger size we need to insert
4790  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4791  * the range between oldsize and size
4792  */
4793 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4794 {
4795         struct btrfs_root *root = BTRFS_I(inode)->root;
4796         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4797         struct extent_map *em = NULL;
4798         struct extent_state *cached_state = NULL;
4799         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4800         u64 hole_start = ALIGN(oldsize, root->sectorsize);
4801         u64 block_end = ALIGN(size, root->sectorsize);
4802         u64 last_byte;
4803         u64 cur_offset;
4804         u64 hole_size;
4805         int err = 0;
4806
4807         /*
4808          * If our size started in the middle of a page we need to zero out the
4809          * rest of the page before we expand the i_size, otherwise we could
4810          * expose stale data.
4811          */
4812         err = btrfs_truncate_page(inode, oldsize, 0, 0);
4813         if (err)
4814                 return err;
4815
4816         if (size <= hole_start)
4817                 return 0;
4818
4819         while (1) {
4820                 struct btrfs_ordered_extent *ordered;
4821
4822                 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4823                                  &cached_state);
4824                 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4825                                                      block_end - hole_start);
4826                 if (!ordered)
4827                         break;
4828                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4829                                      &cached_state, GFP_NOFS);
4830                 btrfs_start_ordered_extent(inode, ordered, 1);
4831                 btrfs_put_ordered_extent(ordered);
4832         }
4833
4834         cur_offset = hole_start;
4835         while (1) {
4836                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4837                                 block_end - cur_offset, 0);
4838                 if (IS_ERR(em)) {
4839                         err = PTR_ERR(em);
4840                         em = NULL;
4841                         break;
4842                 }
4843                 last_byte = min(extent_map_end(em), block_end);
4844                 last_byte = ALIGN(last_byte , root->sectorsize);
4845                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4846                         struct extent_map *hole_em;
4847                         hole_size = last_byte - cur_offset;
4848
4849                         err = maybe_insert_hole(root, inode, cur_offset,
4850                                                 hole_size);
4851                         if (err)
4852                                 break;
4853                         btrfs_drop_extent_cache(inode, cur_offset,
4854                                                 cur_offset + hole_size - 1, 0);
4855                         hole_em = alloc_extent_map();
4856                         if (!hole_em) {
4857                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4858                                         &BTRFS_I(inode)->runtime_flags);
4859                                 goto next;
4860                         }
4861                         hole_em->start = cur_offset;
4862                         hole_em->len = hole_size;
4863                         hole_em->orig_start = cur_offset;
4864
4865                         hole_em->block_start = EXTENT_MAP_HOLE;
4866                         hole_em->block_len = 0;
4867                         hole_em->orig_block_len = 0;
4868                         hole_em->ram_bytes = hole_size;
4869                         hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4870                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
4871                         hole_em->generation = root->fs_info->generation;
4872
4873                         while (1) {
4874                                 write_lock(&em_tree->lock);
4875                                 err = add_extent_mapping(em_tree, hole_em, 1);
4876                                 write_unlock(&em_tree->lock);
4877                                 if (err != -EEXIST)
4878                                         break;
4879                                 btrfs_drop_extent_cache(inode, cur_offset,
4880                                                         cur_offset +
4881                                                         hole_size - 1, 0);
4882                         }
4883                         free_extent_map(hole_em);
4884                 }
4885 next:
4886                 free_extent_map(em);
4887                 em = NULL;
4888                 cur_offset = last_byte;
4889                 if (cur_offset >= block_end)
4890                         break;
4891         }
4892         free_extent_map(em);
4893         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4894                              GFP_NOFS);
4895         return err;
4896 }
4897
4898 static int wait_snapshoting_atomic_t(atomic_t *a)
4899 {
4900         schedule();
4901         return 0;
4902 }
4903
4904 static void wait_for_snapshot_creation(struct btrfs_root *root)
4905 {
4906         while (true) {
4907                 int ret;
4908
4909                 ret = btrfs_start_write_no_snapshoting(root);
4910                 if (ret)
4911                         break;
4912                 wait_on_atomic_t(&root->will_be_snapshoted,
4913                                  wait_snapshoting_atomic_t,
4914                                  TASK_UNINTERRUPTIBLE);
4915         }
4916 }
4917
4918 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4919 {
4920         struct btrfs_root *root = BTRFS_I(inode)->root;
4921         struct btrfs_trans_handle *trans;
4922         loff_t oldsize = i_size_read(inode);
4923         loff_t newsize = attr->ia_size;
4924         int mask = attr->ia_valid;
4925         int ret;
4926
4927         /*
4928          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4929          * special case where we need to update the times despite not having
4930          * these flags set.  For all other operations the VFS set these flags
4931          * explicitly if it wants a timestamp update.
4932          */
4933         if (newsize != oldsize) {
4934                 inode_inc_iversion(inode);
4935                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4936                         inode->i_ctime = inode->i_mtime =
4937                                 current_fs_time(inode->i_sb);
4938         }
4939
4940         if (newsize > oldsize) {
4941                 truncate_pagecache(inode, newsize);
4942                 /*
4943                  * Don't do an expanding truncate while snapshoting is ongoing.
4944                  * This is to ensure the snapshot captures a fully consistent
4945                  * state of this file - if the snapshot captures this expanding
4946                  * truncation, it must capture all writes that happened before
4947                  * this truncation.
4948                  */
4949                 wait_for_snapshot_creation(root);
4950                 ret = btrfs_cont_expand(inode, oldsize, newsize);
4951                 if (ret) {
4952                         btrfs_end_write_no_snapshoting(root);
4953                         return ret;
4954                 }
4955
4956                 trans = btrfs_start_transaction(root, 1);
4957                 if (IS_ERR(trans)) {
4958                         btrfs_end_write_no_snapshoting(root);
4959                         return PTR_ERR(trans);
4960                 }
4961
4962                 i_size_write(inode, newsize);
4963                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4964                 ret = btrfs_update_inode(trans, root, inode);
4965                 btrfs_end_write_no_snapshoting(root);
4966                 btrfs_end_transaction(trans, root);
4967         } else {
4968
4969                 /*
4970                  * We're truncating a file that used to have good data down to
4971                  * zero. Make sure it gets into the ordered flush list so that
4972                  * any new writes get down to disk quickly.
4973                  */
4974                 if (newsize == 0)
4975                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4976                                 &BTRFS_I(inode)->runtime_flags);
4977
4978                 /*
4979                  * 1 for the orphan item we're going to add
4980                  * 1 for the orphan item deletion.
4981                  */
4982                 trans = btrfs_start_transaction(root, 2);
4983                 if (IS_ERR(trans))
4984                         return PTR_ERR(trans);
4985
4986                 /*
4987                  * We need to do this in case we fail at _any_ point during the
4988                  * actual truncate.  Once we do the truncate_setsize we could
4989                  * invalidate pages which forces any outstanding ordered io to
4990                  * be instantly completed which will give us extents that need
4991                  * to be truncated.  If we fail to get an orphan inode down we
4992                  * could have left over extents that were never meant to live,
4993                  * so we need to garuntee from this point on that everything
4994                  * will be consistent.
4995                  */
4996                 ret = btrfs_orphan_add(trans, inode);
4997                 btrfs_end_transaction(trans, root);
4998                 if (ret)
4999                         return ret;
5000
5001                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
5002                 truncate_setsize(inode, newsize);
5003
5004                 /* Disable nonlocked read DIO to avoid the end less truncate */
5005                 btrfs_inode_block_unlocked_dio(inode);
5006                 inode_dio_wait(inode);
5007                 btrfs_inode_resume_unlocked_dio(inode);
5008
5009                 ret = btrfs_truncate(inode);
5010                 if (ret && inode->i_nlink) {
5011                         int err;
5012
5013                         /*
5014                          * failed to truncate, disk_i_size is only adjusted down
5015                          * as we remove extents, so it should represent the true
5016                          * size of the inode, so reset the in memory size and
5017                          * delete our orphan entry.
5018                          */
5019                         trans = btrfs_join_transaction(root);
5020                         if (IS_ERR(trans)) {
5021                                 btrfs_orphan_del(NULL, inode);
5022                                 return ret;
5023                         }
5024                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5025                         err = btrfs_orphan_del(trans, inode);
5026                         if (err)
5027                                 btrfs_abort_transaction(trans, root, err);
5028                         btrfs_end_transaction(trans, root);
5029                 }
5030         }
5031
5032         return ret;
5033 }
5034
5035 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5036 {
5037         struct inode *inode = d_inode(dentry);
5038         struct btrfs_root *root = BTRFS_I(inode)->root;
5039         int err;
5040
5041         if (btrfs_root_readonly(root))
5042                 return -EROFS;
5043
5044         err = inode_change_ok(inode, attr);
5045         if (err)
5046                 return err;
5047
5048         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5049                 err = btrfs_setsize(inode, attr);
5050                 if (err)
5051                         return err;
5052         }
5053
5054         if (attr->ia_valid) {
5055                 setattr_copy(inode, attr);
5056                 inode_inc_iversion(inode);
5057                 err = btrfs_dirty_inode(inode);
5058
5059                 if (!err && attr->ia_valid & ATTR_MODE)
5060                         err = posix_acl_chmod(inode, inode->i_mode);
5061         }
5062
5063         return err;
5064 }
5065
5066 /*
5067  * While truncating the inode pages during eviction, we get the VFS calling
5068  * btrfs_invalidatepage() against each page of the inode. This is slow because
5069  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5070  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5071  * extent_state structures over and over, wasting lots of time.
5072  *
5073  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5074  * those expensive operations on a per page basis and do only the ordered io
5075  * finishing, while we release here the extent_map and extent_state structures,
5076  * without the excessive merging and splitting.
5077  */
5078 static void evict_inode_truncate_pages(struct inode *inode)
5079 {
5080         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5081         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5082         struct rb_node *node;
5083
5084         ASSERT(inode->i_state & I_FREEING);
5085         truncate_inode_pages_final(&inode->i_data);
5086
5087         write_lock(&map_tree->lock);
5088         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5089                 struct extent_map *em;
5090
5091                 node = rb_first(&map_tree->map);
5092                 em = rb_entry(node, struct extent_map, rb_node);
5093                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5094                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5095                 remove_extent_mapping(map_tree, em);
5096                 free_extent_map(em);
5097                 if (need_resched()) {
5098                         write_unlock(&map_tree->lock);
5099                         cond_resched();
5100                         write_lock(&map_tree->lock);
5101                 }
5102         }
5103         write_unlock(&map_tree->lock);
5104
5105         /*
5106          * Keep looping until we have no more ranges in the io tree.
5107          * We can have ongoing bios started by readpages (called from readahead)
5108          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5109          * still in progress (unlocked the pages in the bio but did not yet
5110          * unlocked the ranges in the io tree). Therefore this means some
5111          * ranges can still be locked and eviction started because before
5112          * submitting those bios, which are executed by a separate task (work
5113          * queue kthread), inode references (inode->i_count) were not taken
5114          * (which would be dropped in the end io callback of each bio).
5115          * Therefore here we effectively end up waiting for those bios and
5116          * anyone else holding locked ranges without having bumped the inode's
5117          * reference count - if we don't do it, when they access the inode's
5118          * io_tree to unlock a range it may be too late, leading to an
5119          * use-after-free issue.
5120          */
5121         spin_lock(&io_tree->lock);
5122         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5123                 struct extent_state *state;
5124                 struct extent_state *cached_state = NULL;
5125                 u64 start;
5126                 u64 end;
5127
5128                 node = rb_first(&io_tree->state);
5129                 state = rb_entry(node, struct extent_state, rb_node);
5130                 start = state->start;
5131                 end = state->end;
5132                 spin_unlock(&io_tree->lock);
5133
5134                 lock_extent_bits(io_tree, start, end, 0, &cached_state);
5135
5136                 /*
5137                  * If still has DELALLOC flag, the extent didn't reach disk,
5138                  * and its reserved space won't be freed by delayed_ref.
5139                  * So we need to free its reserved space here.
5140                  * (Refer to comment in btrfs_invalidatepage, case 2)
5141                  *
5142                  * Note, end is the bytenr of last byte, so we need + 1 here.
5143                  */
5144                 if (state->state & EXTENT_DELALLOC)
5145                         btrfs_qgroup_free_data(inode, start, end - start + 1);
5146
5147                 clear_extent_bit(io_tree, start, end,
5148                                  EXTENT_LOCKED | EXTENT_DIRTY |
5149                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5150                                  EXTENT_DEFRAG, 1, 1,
5151                                  &cached_state, GFP_NOFS);
5152
5153                 cond_resched();
5154                 spin_lock(&io_tree->lock);
5155         }
5156         spin_unlock(&io_tree->lock);
5157 }
5158
5159 void btrfs_evict_inode(struct inode *inode)
5160 {
5161         struct btrfs_trans_handle *trans;
5162         struct btrfs_root *root = BTRFS_I(inode)->root;
5163         struct btrfs_block_rsv *rsv, *global_rsv;
5164         int steal_from_global = 0;
5165         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
5166         int ret;
5167
5168         trace_btrfs_inode_evict(inode);
5169
5170         evict_inode_truncate_pages(inode);
5171
5172         if (inode->i_nlink &&
5173             ((btrfs_root_refs(&root->root_item) != 0 &&
5174               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5175              btrfs_is_free_space_inode(inode)))
5176                 goto no_delete;
5177
5178         if (is_bad_inode(inode)) {
5179                 btrfs_orphan_del(NULL, inode);
5180                 goto no_delete;
5181         }
5182         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5183         if (!special_file(inode->i_mode))
5184                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5185
5186         btrfs_free_io_failure_record(inode, 0, (u64)-1);
5187
5188         if (root->fs_info->log_root_recovering) {
5189                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5190                                  &BTRFS_I(inode)->runtime_flags));
5191                 goto no_delete;
5192         }
5193
5194         if (inode->i_nlink > 0) {
5195                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5196                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5197                 goto no_delete;
5198         }
5199
5200         ret = btrfs_commit_inode_delayed_inode(inode);
5201         if (ret) {
5202                 btrfs_orphan_del(NULL, inode);
5203                 goto no_delete;
5204         }
5205
5206         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
5207         if (!rsv) {
5208                 btrfs_orphan_del(NULL, inode);
5209                 goto no_delete;
5210         }
5211         rsv->size = min_size;
5212         rsv->failfast = 1;
5213         global_rsv = &root->fs_info->global_block_rsv;
5214
5215         btrfs_i_size_write(inode, 0);
5216
5217         /*
5218          * This is a bit simpler than btrfs_truncate since we've already
5219          * reserved our space for our orphan item in the unlink, so we just
5220          * need to reserve some slack space in case we add bytes and update
5221          * inode item when doing the truncate.
5222          */
5223         while (1) {
5224                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5225                                              BTRFS_RESERVE_FLUSH_LIMIT);
5226
5227                 /*
5228                  * Try and steal from the global reserve since we will
5229                  * likely not use this space anyway, we want to try as
5230                  * hard as possible to get this to work.
5231                  */
5232                 if (ret)
5233                         steal_from_global++;
5234                 else
5235                         steal_from_global = 0;
5236                 ret = 0;
5237
5238                 /*
5239                  * steal_from_global == 0: we reserved stuff, hooray!
5240                  * steal_from_global == 1: we didn't reserve stuff, boo!
5241                  * steal_from_global == 2: we've committed, still not a lot of
5242                  * room but maybe we'll have room in the global reserve this
5243                  * time.
5244                  * steal_from_global == 3: abandon all hope!
5245                  */
5246                 if (steal_from_global > 2) {
5247                         btrfs_warn(root->fs_info,
5248                                 "Could not get space for a delete, will truncate on mount %d",
5249                                 ret);
5250                         btrfs_orphan_del(NULL, inode);
5251                         btrfs_free_block_rsv(root, rsv);
5252                         goto no_delete;
5253                 }
5254
5255                 trans = btrfs_join_transaction(root);
5256                 if (IS_ERR(trans)) {
5257                         btrfs_orphan_del(NULL, inode);
5258                         btrfs_free_block_rsv(root, rsv);
5259                         goto no_delete;
5260                 }
5261
5262                 /*
5263                  * We can't just steal from the global reserve, we need tomake
5264                  * sure there is room to do it, if not we need to commit and try
5265                  * again.
5266                  */
5267                 if (steal_from_global) {
5268                         if (!btrfs_check_space_for_delayed_refs(trans, root))
5269                                 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5270                                                               min_size);
5271                         else
5272                                 ret = -ENOSPC;
5273                 }
5274
5275                 /*
5276                  * Couldn't steal from the global reserve, we have too much
5277                  * pending stuff built up, commit the transaction and try it
5278                  * again.
5279                  */
5280                 if (ret) {
5281                         ret = btrfs_commit_transaction(trans, root);
5282                         if (ret) {
5283                                 btrfs_orphan_del(NULL, inode);
5284                                 btrfs_free_block_rsv(root, rsv);
5285                                 goto no_delete;
5286                         }
5287                         continue;
5288                 } else {
5289                         steal_from_global = 0;
5290                 }
5291
5292                 trans->block_rsv = rsv;
5293
5294                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5295                 if (ret != -ENOSPC && ret != -EAGAIN)
5296                         break;
5297
5298                 trans->block_rsv = &root->fs_info->trans_block_rsv;
5299                 btrfs_end_transaction(trans, root);
5300                 trans = NULL;
5301                 btrfs_btree_balance_dirty(root);
5302         }
5303
5304         btrfs_free_block_rsv(root, rsv);
5305
5306         /*
5307          * Errors here aren't a big deal, it just means we leave orphan items
5308          * in the tree.  They will be cleaned up on the next mount.
5309          */
5310         if (ret == 0) {
5311                 trans->block_rsv = root->orphan_block_rsv;
5312                 btrfs_orphan_del(trans, inode);
5313         } else {
5314                 btrfs_orphan_del(NULL, inode);
5315         }
5316
5317         trans->block_rsv = &root->fs_info->trans_block_rsv;
5318         if (!(root == root->fs_info->tree_root ||
5319               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5320                 btrfs_return_ino(root, btrfs_ino(inode));
5321
5322         btrfs_end_transaction(trans, root);
5323         btrfs_btree_balance_dirty(root);
5324 no_delete:
5325         btrfs_remove_delayed_node(inode);
5326         clear_inode(inode);
5327         return;
5328 }
5329
5330 /*
5331  * this returns the key found in the dir entry in the location pointer.
5332  * If no dir entries were found, location->objectid is 0.
5333  */
5334 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5335                                struct btrfs_key *location)
5336 {
5337         const char *name = dentry->d_name.name;
5338         int namelen = dentry->d_name.len;
5339         struct btrfs_dir_item *di;
5340         struct btrfs_path *path;
5341         struct btrfs_root *root = BTRFS_I(dir)->root;
5342         int ret = 0;
5343
5344         path = btrfs_alloc_path();
5345         if (!path)
5346                 return -ENOMEM;
5347
5348         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
5349                                     namelen, 0);
5350         if (IS_ERR(di))
5351                 ret = PTR_ERR(di);
5352
5353         if (IS_ERR_OR_NULL(di))
5354                 goto out_err;
5355
5356         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5357 out:
5358         btrfs_free_path(path);
5359         return ret;
5360 out_err:
5361         location->objectid = 0;
5362         goto out;
5363 }
5364
5365 /*
5366  * when we hit a tree root in a directory, the btrfs part of the inode
5367  * needs to be changed to reflect the root directory of the tree root.  This
5368  * is kind of like crossing a mount point.
5369  */
5370 static int fixup_tree_root_location(struct btrfs_root *root,
5371                                     struct inode *dir,
5372                                     struct dentry *dentry,
5373                                     struct btrfs_key *location,
5374                                     struct btrfs_root **sub_root)
5375 {
5376         struct btrfs_path *path;
5377         struct btrfs_root *new_root;
5378         struct btrfs_root_ref *ref;
5379         struct extent_buffer *leaf;
5380         struct btrfs_key key;
5381         int ret;
5382         int err = 0;
5383
5384         path = btrfs_alloc_path();
5385         if (!path) {
5386                 err = -ENOMEM;
5387                 goto out;
5388         }
5389
5390         err = -ENOENT;
5391         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5392         key.type = BTRFS_ROOT_REF_KEY;
5393         key.offset = location->objectid;
5394
5395         ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
5396                                 0, 0);
5397         if (ret) {
5398                 if (ret < 0)
5399                         err = ret;
5400                 goto out;
5401         }
5402
5403         leaf = path->nodes[0];
5404         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5405         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5406             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5407                 goto out;
5408
5409         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5410                                    (unsigned long)(ref + 1),
5411                                    dentry->d_name.len);
5412         if (ret)
5413                 goto out;
5414
5415         btrfs_release_path(path);
5416
5417         new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
5418         if (IS_ERR(new_root)) {
5419                 err = PTR_ERR(new_root);
5420                 goto out;
5421         }
5422
5423         *sub_root = new_root;
5424         location->objectid = btrfs_root_dirid(&new_root->root_item);
5425         location->type = BTRFS_INODE_ITEM_KEY;
5426         location->offset = 0;
5427         err = 0;
5428 out:
5429         btrfs_free_path(path);
5430         return err;
5431 }
5432
5433 static void inode_tree_add(struct inode *inode)
5434 {
5435         struct btrfs_root *root = BTRFS_I(inode)->root;
5436         struct btrfs_inode *entry;
5437         struct rb_node **p;
5438         struct rb_node *parent;
5439         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5440         u64 ino = btrfs_ino(inode);
5441
5442         if (inode_unhashed(inode))
5443                 return;
5444         parent = NULL;
5445         spin_lock(&root->inode_lock);
5446         p = &root->inode_tree.rb_node;
5447         while (*p) {
5448                 parent = *p;
5449                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5450
5451                 if (ino < btrfs_ino(&entry->vfs_inode))
5452                         p = &parent->rb_left;
5453                 else if (ino > btrfs_ino(&entry->vfs_inode))
5454                         p = &parent->rb_right;
5455                 else {
5456                         WARN_ON(!(entry->vfs_inode.i_state &
5457                                   (I_WILL_FREE | I_FREEING)));
5458                         rb_replace_node(parent, new, &root->inode_tree);
5459                         RB_CLEAR_NODE(parent);
5460                         spin_unlock(&root->inode_lock);
5461                         return;
5462                 }
5463         }
5464         rb_link_node(new, parent, p);
5465         rb_insert_color(new, &root->inode_tree);
5466         spin_unlock(&root->inode_lock);
5467 }
5468
5469 static void inode_tree_del(struct inode *inode)
5470 {
5471         struct btrfs_root *root = BTRFS_I(inode)->root;
5472         int empty = 0;
5473
5474         spin_lock(&root->inode_lock);
5475         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5476                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5477                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5478                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5479         }
5480         spin_unlock(&root->inode_lock);
5481
5482         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5483                 synchronize_srcu(&root->fs_info->subvol_srcu);
5484                 spin_lock(&root->inode_lock);
5485                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5486                 spin_unlock(&root->inode_lock);
5487                 if (empty)
5488                         btrfs_add_dead_root(root);
5489         }
5490 }
5491
5492 void btrfs_invalidate_inodes(struct btrfs_root *root)
5493 {
5494         struct rb_node *node;
5495         struct rb_node *prev;
5496         struct btrfs_inode *entry;
5497         struct inode *inode;
5498         u64 objectid = 0;
5499
5500         if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
5501                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5502
5503         spin_lock(&root->inode_lock);
5504 again:
5505         node = root->inode_tree.rb_node;
5506         prev = NULL;
5507         while (node) {
5508                 prev = node;
5509                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5510
5511                 if (objectid < btrfs_ino(&entry->vfs_inode))
5512                         node = node->rb_left;
5513                 else if (objectid > btrfs_ino(&entry->vfs_inode))
5514                         node = node->rb_right;
5515                 else
5516                         break;
5517         }
5518         if (!node) {
5519                 while (prev) {
5520                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
5521                         if (objectid <= btrfs_ino(&entry->vfs_inode)) {
5522                                 node = prev;
5523                                 break;
5524                         }
5525                         prev = rb_next(prev);
5526                 }
5527         }
5528         while (node) {
5529                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5530                 objectid = btrfs_ino(&entry->vfs_inode) + 1;
5531                 inode = igrab(&entry->vfs_inode);
5532                 if (inode) {
5533                         spin_unlock(&root->inode_lock);
5534                         if (atomic_read(&inode->i_count) > 1)
5535                                 d_prune_aliases(inode);
5536                         /*
5537                          * btrfs_drop_inode will have it removed from
5538                          * the inode cache when its usage count
5539                          * hits zero.
5540                          */
5541                         iput(inode);
5542                         cond_resched();
5543                         spin_lock(&root->inode_lock);
5544                         goto again;
5545                 }
5546
5547                 if (cond_resched_lock(&root->inode_lock))
5548                         goto again;
5549
5550                 node = rb_next(node);
5551         }
5552         spin_unlock(&root->inode_lock);
5553 }
5554
5555 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5556 {
5557         struct btrfs_iget_args *args = p;
5558         inode->i_ino = args->location->objectid;
5559         memcpy(&BTRFS_I(inode)->location, args->location,
5560                sizeof(*args->location));
5561         BTRFS_I(inode)->root = args->root;
5562         return 0;
5563 }
5564
5565 static int btrfs_find_actor(struct inode *inode, void *opaque)
5566 {
5567         struct btrfs_iget_args *args = opaque;
5568         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5569                 args->root == BTRFS_I(inode)->root;
5570 }
5571
5572 static struct inode *btrfs_iget_locked(struct super_block *s,
5573                                        struct btrfs_key *location,
5574                                        struct btrfs_root *root)
5575 {
5576         struct inode *inode;
5577         struct btrfs_iget_args args;
5578         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5579
5580         args.location = location;
5581         args.root = root;
5582
5583         inode = iget5_locked(s, hashval, btrfs_find_actor,
5584                              btrfs_init_locked_inode,
5585                              (void *)&args);
5586         return inode;
5587 }
5588
5589 /* Get an inode object given its location and corresponding root.
5590  * Returns in *is_new if the inode was read from disk
5591  */
5592 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5593                          struct btrfs_root *root, int *new)
5594 {
5595         struct inode *inode;
5596
5597         inode = btrfs_iget_locked(s, location, root);
5598         if (!inode)
5599                 return ERR_PTR(-ENOMEM);
5600
5601         if (inode->i_state & I_NEW) {
5602                 btrfs_read_locked_inode(inode);
5603                 if (!is_bad_inode(inode)) {
5604                         inode_tree_add(inode);
5605                         unlock_new_inode(inode);
5606                         if (new)
5607                                 *new = 1;
5608                 } else {
5609                         unlock_new_inode(inode);
5610                         iput(inode);
5611                         inode = ERR_PTR(-ESTALE);
5612                 }
5613         }
5614
5615         return inode;
5616 }
5617
5618 static struct inode *new_simple_dir(struct super_block *s,
5619                                     struct btrfs_key *key,
5620                                     struct btrfs_root *root)
5621 {
5622         struct inode *inode = new_inode(s);
5623
5624         if (!inode)
5625                 return ERR_PTR(-ENOMEM);
5626
5627         BTRFS_I(inode)->root = root;
5628         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5629         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5630
5631         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5632         inode->i_op = &btrfs_dir_ro_inode_operations;
5633         inode->i_fop = &simple_dir_operations;
5634         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5635         inode->i_mtime = CURRENT_TIME;
5636         inode->i_atime = inode->i_mtime;
5637         inode->i_ctime = inode->i_mtime;
5638         BTRFS_I(inode)->i_otime = inode->i_mtime;
5639
5640         return inode;
5641 }
5642
5643 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5644 {
5645         struct inode *inode;
5646         struct btrfs_root *root = BTRFS_I(dir)->root;
5647         struct btrfs_root *sub_root = root;
5648         struct btrfs_key location;
5649         int index;
5650         int ret = 0;
5651
5652         if (dentry->d_name.len > BTRFS_NAME_LEN)
5653                 return ERR_PTR(-ENAMETOOLONG);
5654
5655         ret = btrfs_inode_by_name(dir, dentry, &location);
5656         if (ret < 0)
5657                 return ERR_PTR(ret);
5658
5659         if (location.objectid == 0)
5660                 return ERR_PTR(-ENOENT);
5661
5662         if (location.type == BTRFS_INODE_ITEM_KEY) {
5663                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5664                 return inode;
5665         }
5666
5667         BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5668
5669         index = srcu_read_lock(&root->fs_info->subvol_srcu);
5670         ret = fixup_tree_root_location(root, dir, dentry,
5671                                        &location, &sub_root);
5672         if (ret < 0) {
5673                 if (ret != -ENOENT)
5674                         inode = ERR_PTR(ret);
5675                 else
5676                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5677         } else {
5678                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5679         }
5680         srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5681
5682         if (!IS_ERR(inode) && root != sub_root) {
5683                 down_read(&root->fs_info->cleanup_work_sem);
5684                 if (!(inode->i_sb->s_flags & MS_RDONLY))
5685                         ret = btrfs_orphan_cleanup(sub_root);
5686                 up_read(&root->fs_info->cleanup_work_sem);
5687                 if (ret) {
5688                         iput(inode);
5689                         inode = ERR_PTR(ret);
5690                 }
5691         }
5692
5693         return inode;
5694 }
5695
5696 static int btrfs_dentry_delete(const struct dentry *dentry)
5697 {
5698         struct btrfs_root *root;
5699         struct inode *inode = d_inode(dentry);
5700
5701         if (!inode && !IS_ROOT(dentry))
5702                 inode = d_inode(dentry->d_parent);
5703
5704         if (inode) {
5705                 root = BTRFS_I(inode)->root;
5706                 if (btrfs_root_refs(&root->root_item) == 0)
5707                         return 1;
5708
5709                 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5710                         return 1;
5711         }
5712         return 0;
5713 }
5714
5715 static void btrfs_dentry_release(struct dentry *dentry)
5716 {
5717         kfree(dentry->d_fsdata);
5718 }
5719
5720 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5721                                    unsigned int flags)
5722 {
5723         struct inode *inode;
5724
5725         inode = btrfs_lookup_dentry(dir, dentry);
5726         if (IS_ERR(inode)) {
5727                 if (PTR_ERR(inode) == -ENOENT)
5728                         inode = NULL;
5729                 else
5730                         return ERR_CAST(inode);
5731         }
5732
5733         return d_splice_alias(inode, dentry);
5734 }
5735
5736 unsigned char btrfs_filetype_table[] = {
5737         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5738 };
5739
5740 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5741 {
5742         struct inode *inode = file_inode(file);
5743         struct btrfs_root *root = BTRFS_I(inode)->root;
5744         struct btrfs_item *item;
5745         struct btrfs_dir_item *di;
5746         struct btrfs_key key;
5747         struct btrfs_key found_key;
5748         struct btrfs_path *path;
5749         struct list_head ins_list;
5750         struct list_head del_list;
5751         int ret;
5752         struct extent_buffer *leaf;
5753         int slot;
5754         unsigned char d_type;
5755         int over = 0;
5756         u32 di_cur;
5757         u32 di_total;
5758         u32 di_len;
5759         int key_type = BTRFS_DIR_INDEX_KEY;
5760         char tmp_name[32];
5761         char *name_ptr;
5762         int name_len;
5763         int is_curr = 0;        /* ctx->pos points to the current index? */
5764
5765         /* FIXME, use a real flag for deciding about the key type */
5766         if (root->fs_info->tree_root == root)
5767                 key_type = BTRFS_DIR_ITEM_KEY;
5768
5769         if (!dir_emit_dots(file, ctx))
5770                 return 0;
5771
5772         path = btrfs_alloc_path();
5773         if (!path)
5774                 return -ENOMEM;
5775
5776         path->reada = 1;
5777
5778         if (key_type == BTRFS_DIR_INDEX_KEY) {
5779                 INIT_LIST_HEAD(&ins_list);
5780                 INIT_LIST_HEAD(&del_list);
5781                 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5782         }
5783
5784         key.type = key_type;
5785         key.offset = ctx->pos;
5786         key.objectid = btrfs_ino(inode);
5787
5788         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5789         if (ret < 0)
5790                 goto err;
5791
5792         while (1) {
5793                 leaf = path->nodes[0];
5794                 slot = path->slots[0];
5795                 if (slot >= btrfs_header_nritems(leaf)) {
5796                         ret = btrfs_next_leaf(root, path);
5797                         if (ret < 0)
5798                                 goto err;
5799                         else if (ret > 0)
5800                                 break;
5801                         continue;
5802                 }
5803
5804                 item = btrfs_item_nr(slot);
5805                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5806
5807                 if (found_key.objectid != key.objectid)
5808                         break;
5809                 if (found_key.type != key_type)
5810                         break;
5811                 if (found_key.offset < ctx->pos)
5812                         goto next;
5813                 if (key_type == BTRFS_DIR_INDEX_KEY &&
5814                     btrfs_should_delete_dir_index(&del_list,
5815                                                   found_key.offset))
5816                         goto next;
5817
5818                 ctx->pos = found_key.offset;
5819                 is_curr = 1;
5820
5821                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5822                 di_cur = 0;
5823                 di_total = btrfs_item_size(leaf, item);
5824
5825                 while (di_cur < di_total) {
5826                         struct btrfs_key location;
5827
5828                         if (verify_dir_item(root, leaf, di))
5829                                 break;
5830
5831                         name_len = btrfs_dir_name_len(leaf, di);
5832                         if (name_len <= sizeof(tmp_name)) {
5833                                 name_ptr = tmp_name;
5834                         } else {
5835                                 name_ptr = kmalloc(name_len, GFP_NOFS);
5836                                 if (!name_ptr) {
5837                                         ret = -ENOMEM;
5838                                         goto err;
5839                                 }
5840                         }
5841                         read_extent_buffer(leaf, name_ptr,
5842                                            (unsigned long)(di + 1), name_len);
5843
5844                         d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5845                         btrfs_dir_item_key_to_cpu(leaf, di, &location);
5846
5847
5848                         /* is this a reference to our own snapshot? If so
5849                          * skip it.
5850                          *
5851                          * In contrast to old kernels, we insert the snapshot's
5852                          * dir item and dir index after it has been created, so
5853                          * we won't find a reference to our own snapshot. We
5854                          * still keep the following code for backward
5855                          * compatibility.
5856                          */
5857                         if (location.type == BTRFS_ROOT_ITEM_KEY &&
5858                             location.objectid == root->root_key.objectid) {
5859                                 over = 0;
5860                                 goto skip;
5861                         }
5862                         over = !dir_emit(ctx, name_ptr, name_len,
5863                                        location.objectid, d_type);
5864
5865 skip:
5866                         if (name_ptr != tmp_name)
5867                                 kfree(name_ptr);
5868
5869                         if (over)
5870                                 goto nopos;
5871                         di_len = btrfs_dir_name_len(leaf, di) +
5872                                  btrfs_dir_data_len(leaf, di) + sizeof(*di);
5873                         di_cur += di_len;
5874                         di = (struct btrfs_dir_item *)((char *)di + di_len);
5875                 }
5876 next:
5877                 path->slots[0]++;
5878         }
5879
5880         if (key_type == BTRFS_DIR_INDEX_KEY) {
5881                 if (is_curr)
5882                         ctx->pos++;
5883                 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5884                 if (ret)
5885                         goto nopos;
5886         }
5887
5888         /* Reached end of directory/root. Bump pos past the last item. */
5889         ctx->pos++;
5890
5891         /*
5892          * Stop new entries from being returned after we return the last
5893          * entry.
5894          *
5895          * New directory entries are assigned a strictly increasing
5896          * offset.  This means that new entries created during readdir
5897          * are *guaranteed* to be seen in the future by that readdir.
5898          * This has broken buggy programs which operate on names as
5899          * they're returned by readdir.  Until we re-use freed offsets
5900          * we have this hack to stop new entries from being returned
5901          * under the assumption that they'll never reach this huge
5902          * offset.
5903          *
5904          * This is being careful not to overflow 32bit loff_t unless the
5905          * last entry requires it because doing so has broken 32bit apps
5906          * in the past.
5907          */
5908         if (key_type == BTRFS_DIR_INDEX_KEY) {
5909                 if (ctx->pos >= INT_MAX)
5910                         ctx->pos = LLONG_MAX;
5911                 else
5912                         ctx->pos = INT_MAX;
5913         }
5914 nopos:
5915         ret = 0;
5916 err:
5917         if (key_type == BTRFS_DIR_INDEX_KEY)
5918                 btrfs_put_delayed_items(&ins_list, &del_list);
5919         btrfs_free_path(path);
5920         return ret;
5921 }
5922
5923 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5924 {
5925         struct btrfs_root *root = BTRFS_I(inode)->root;
5926         struct btrfs_trans_handle *trans;
5927         int ret = 0;
5928         bool nolock = false;
5929
5930         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5931                 return 0;
5932
5933         if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5934                 nolock = true;
5935
5936         if (wbc->sync_mode == WB_SYNC_ALL) {
5937                 if (nolock)
5938                         trans = btrfs_join_transaction_nolock(root);
5939                 else
5940                         trans = btrfs_join_transaction(root);
5941                 if (IS_ERR(trans))
5942                         return PTR_ERR(trans);
5943                 ret = btrfs_commit_transaction(trans, root);
5944         }
5945         return ret;
5946 }
5947
5948 /*
5949  * This is somewhat expensive, updating the tree every time the
5950  * inode changes.  But, it is most likely to find the inode in cache.
5951  * FIXME, needs more benchmarking...there are no reasons other than performance
5952  * to keep or drop this code.
5953  */
5954 static int btrfs_dirty_inode(struct inode *inode)
5955 {
5956         struct btrfs_root *root = BTRFS_I(inode)->root;
5957         struct btrfs_trans_handle *trans;
5958         int ret;
5959
5960         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5961                 return 0;
5962
5963         trans = btrfs_join_transaction(root);
5964         if (IS_ERR(trans))
5965                 return PTR_ERR(trans);
5966
5967         ret = btrfs_update_inode(trans, root, inode);
5968         if (ret && ret == -ENOSPC) {
5969                 /* whoops, lets try again with the full transaction */
5970                 btrfs_end_transaction(trans, root);
5971                 trans = btrfs_start_transaction(root, 1);
5972                 if (IS_ERR(trans))
5973                         return PTR_ERR(trans);
5974
5975                 ret = btrfs_update_inode(trans, root, inode);
5976         }
5977         btrfs_end_transaction(trans, root);
5978         if (BTRFS_I(inode)->delayed_node)
5979                 btrfs_balance_delayed_items(root);
5980
5981         return ret;
5982 }
5983
5984 /*
5985  * This is a copy of file_update_time.  We need this so we can return error on
5986  * ENOSPC for updating the inode in the case of file write and mmap writes.
5987  */
5988 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5989                              int flags)
5990 {
5991         struct btrfs_root *root = BTRFS_I(inode)->root;
5992
5993         if (btrfs_root_readonly(root))
5994                 return -EROFS;
5995
5996         if (flags & S_VERSION)
5997                 inode_inc_iversion(inode);
5998         if (flags & S_CTIME)
5999                 inode->i_ctime = *now;
6000         if (flags & S_MTIME)
6001                 inode->i_mtime = *now;
6002         if (flags & S_ATIME)
6003                 inode->i_atime = *now;
6004         return btrfs_dirty_inode(inode);
6005 }
6006
6007 /*
6008  * find the highest existing sequence number in a directory
6009  * and then set the in-memory index_cnt variable to reflect
6010  * free sequence numbers
6011  */
6012 static int btrfs_set_inode_index_count(struct inode *inode)
6013 {
6014         struct btrfs_root *root = BTRFS_I(inode)->root;
6015         struct btrfs_key key, found_key;
6016         struct btrfs_path *path;
6017         struct extent_buffer *leaf;
6018         int ret;
6019
6020         key.objectid = btrfs_ino(inode);
6021         key.type = BTRFS_DIR_INDEX_KEY;
6022         key.offset = (u64)-1;
6023
6024         path = btrfs_alloc_path();
6025         if (!path)
6026                 return -ENOMEM;
6027
6028         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6029         if (ret < 0)
6030                 goto out;
6031         /* FIXME: we should be able to handle this */
6032         if (ret == 0)
6033                 goto out;
6034         ret = 0;
6035
6036         /*
6037          * MAGIC NUMBER EXPLANATION:
6038          * since we search a directory based on f_pos we have to start at 2
6039          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6040          * else has to start at 2
6041          */
6042         if (path->slots[0] == 0) {
6043                 BTRFS_I(inode)->index_cnt = 2;
6044                 goto out;
6045         }
6046
6047         path->slots[0]--;
6048
6049         leaf = path->nodes[0];
6050         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6051
6052         if (found_key.objectid != btrfs_ino(inode) ||
6053             found_key.type != BTRFS_DIR_INDEX_KEY) {
6054                 BTRFS_I(inode)->index_cnt = 2;
6055                 goto out;
6056         }
6057
6058         BTRFS_I(inode)->index_cnt = found_key.offset + 1;
6059 out:
6060         btrfs_free_path(path);
6061         return ret;
6062 }
6063
6064 /*
6065  * helper to find a free sequence number in a given directory.  This current
6066  * code is very simple, later versions will do smarter things in the btree
6067  */
6068 int btrfs_set_inode_index(struct inode *dir, u64 *index)
6069 {
6070         int ret = 0;
6071
6072         if (BTRFS_I(dir)->index_cnt == (u64)-1) {
6073                 ret = btrfs_inode_delayed_dir_index_count(dir);
6074                 if (ret) {
6075                         ret = btrfs_set_inode_index_count(dir);
6076                         if (ret)
6077                                 return ret;
6078                 }
6079         }
6080
6081         *index = BTRFS_I(dir)->index_cnt;
6082         BTRFS_I(dir)->index_cnt++;
6083
6084         return ret;
6085 }
6086
6087 static int btrfs_insert_inode_locked(struct inode *inode)
6088 {
6089         struct btrfs_iget_args args;
6090         args.location = &BTRFS_I(inode)->location;
6091         args.root = BTRFS_I(inode)->root;
6092
6093         return insert_inode_locked4(inode,
6094                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6095                    btrfs_find_actor, &args);
6096 }
6097
6098 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6099                                      struct btrfs_root *root,
6100                                      struct inode *dir,
6101                                      const char *name, int name_len,
6102                                      u64 ref_objectid, u64 objectid,
6103                                      umode_t mode, u64 *index)
6104 {
6105         struct inode *inode;
6106         struct btrfs_inode_item *inode_item;
6107         struct btrfs_key *location;
6108         struct btrfs_path *path;
6109         struct btrfs_inode_ref *ref;
6110         struct btrfs_key key[2];
6111         u32 sizes[2];
6112         int nitems = name ? 2 : 1;
6113         unsigned long ptr;
6114         int ret;
6115
6116         path = btrfs_alloc_path();
6117         if (!path)
6118                 return ERR_PTR(-ENOMEM);
6119
6120         inode = new_inode(root->fs_info->sb);
6121         if (!inode) {
6122                 btrfs_free_path(path);
6123                 return ERR_PTR(-ENOMEM);
6124         }
6125
6126         /*
6127          * O_TMPFILE, set link count to 0, so that after this point,
6128          * we fill in an inode item with the correct link count.
6129          */
6130         if (!name)
6131                 set_nlink(inode, 0);
6132
6133         /*
6134          * we have to initialize this early, so we can reclaim the inode
6135          * number if we fail afterwards in this function.
6136          */
6137         inode->i_ino = objectid;
6138
6139         if (dir && name) {
6140                 trace_btrfs_inode_request(dir);
6141
6142                 ret = btrfs_set_inode_index(dir, index);
6143                 if (ret) {
6144                         btrfs_free_path(path);
6145                         iput(inode);
6146                         return ERR_PTR(ret);
6147                 }
6148         } else if (dir) {
6149                 *index = 0;
6150         }
6151         /*
6152          * index_cnt is ignored for everything but a dir,
6153          * btrfs_get_inode_index_count has an explanation for the magic
6154          * number
6155          */
6156         BTRFS_I(inode)->index_cnt = 2;
6157         BTRFS_I(inode)->dir_index = *index;
6158         BTRFS_I(inode)->root = root;
6159         BTRFS_I(inode)->generation = trans->transid;
6160         inode->i_generation = BTRFS_I(inode)->generation;
6161
6162         /*
6163          * We could have gotten an inode number from somebody who was fsynced
6164          * and then removed in this same transaction, so let's just set full
6165          * sync since it will be a full sync anyway and this will blow away the
6166          * old info in the log.
6167          */
6168         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6169
6170         key[0].objectid = objectid;
6171         key[0].type = BTRFS_INODE_ITEM_KEY;
6172         key[0].offset = 0;
6173
6174         sizes[0] = sizeof(struct btrfs_inode_item);
6175
6176         if (name) {
6177                 /*
6178                  * Start new inodes with an inode_ref. This is slightly more
6179                  * efficient for small numbers of hard links since they will
6180                  * be packed into one item. Extended refs will kick in if we
6181                  * add more hard links than can fit in the ref item.
6182                  */
6183                 key[1].objectid = objectid;
6184                 key[1].type = BTRFS_INODE_REF_KEY;
6185                 key[1].offset = ref_objectid;
6186
6187                 sizes[1] = name_len + sizeof(*ref);
6188         }
6189
6190         location = &BTRFS_I(inode)->location;
6191         location->objectid = objectid;
6192         location->offset = 0;
6193         location->type = BTRFS_INODE_ITEM_KEY;
6194
6195         ret = btrfs_insert_inode_locked(inode);
6196         if (ret < 0)
6197                 goto fail;
6198
6199         path->leave_spinning = 1;
6200         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6201         if (ret != 0)
6202                 goto fail_unlock;
6203
6204         inode_init_owner(inode, dir, mode);
6205         inode_set_bytes(inode, 0);
6206
6207         inode->i_mtime = CURRENT_TIME;
6208         inode->i_atime = inode->i_mtime;
6209         inode->i_ctime = inode->i_mtime;
6210         BTRFS_I(inode)->i_otime = inode->i_mtime;
6211
6212         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6213                                   struct btrfs_inode_item);
6214         memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
6215                              sizeof(*inode_item));
6216         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6217
6218         if (name) {
6219                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6220                                      struct btrfs_inode_ref);
6221                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6222                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6223                 ptr = (unsigned long)(ref + 1);
6224                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6225         }
6226
6227         btrfs_mark_buffer_dirty(path->nodes[0]);
6228         btrfs_free_path(path);
6229
6230         btrfs_inherit_iflags(inode, dir);
6231
6232         if (S_ISREG(mode)) {
6233                 if (btrfs_test_opt(root, NODATASUM))
6234                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6235                 if (btrfs_test_opt(root, NODATACOW))
6236                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6237                                 BTRFS_INODE_NODATASUM;
6238         }
6239
6240         inode_tree_add(inode);
6241
6242         trace_btrfs_inode_new(inode);
6243         btrfs_set_inode_last_trans(trans, inode);
6244
6245         btrfs_update_root_times(trans, root);
6246
6247         ret = btrfs_inode_inherit_props(trans, inode, dir);
6248         if (ret)
6249                 btrfs_err(root->fs_info,
6250                           "error inheriting props for ino %llu (root %llu): %d",
6251                           btrfs_ino(inode), root->root_key.objectid, ret);
6252
6253         return inode;
6254
6255 fail_unlock:
6256         unlock_new_inode(inode);
6257 fail:
6258         if (dir && name)
6259                 BTRFS_I(dir)->index_cnt--;
6260         btrfs_free_path(path);
6261         iput(inode);
6262         return ERR_PTR(ret);
6263 }
6264
6265 static inline u8 btrfs_inode_type(struct inode *inode)
6266 {
6267         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6268 }
6269
6270 /*
6271  * utility function to add 'inode' into 'parent_inode' with
6272  * a give name and a given sequence number.
6273  * if 'add_backref' is true, also insert a backref from the
6274  * inode to the parent directory.
6275  */
6276 int btrfs_add_link(struct btrfs_trans_handle *trans,
6277                    struct inode *parent_inode, struct inode *inode,
6278                    const char *name, int name_len, int add_backref, u64 index)
6279 {
6280         int ret = 0;
6281         struct btrfs_key key;
6282         struct btrfs_root *root = BTRFS_I(parent_inode)->root;
6283         u64 ino = btrfs_ino(inode);
6284         u64 parent_ino = btrfs_ino(parent_inode);
6285
6286         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6287                 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
6288         } else {
6289                 key.objectid = ino;
6290                 key.type = BTRFS_INODE_ITEM_KEY;
6291                 key.offset = 0;
6292         }
6293
6294         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6295                 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
6296                                          key.objectid, root->root_key.objectid,
6297                                          parent_ino, index, name, name_len);
6298         } else if (add_backref) {
6299                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6300                                              parent_ino, index);
6301         }
6302
6303         /* Nothing to clean up yet */
6304         if (ret)
6305                 return ret;
6306
6307         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6308                                     parent_inode, &key,
6309                                     btrfs_inode_type(inode), index);
6310         if (ret == -EEXIST || ret == -EOVERFLOW)
6311                 goto fail_dir_item;
6312         else if (ret) {
6313                 btrfs_abort_transaction(trans, root, ret);
6314                 return ret;
6315         }
6316
6317         btrfs_i_size_write(parent_inode, parent_inode->i_size +
6318                            name_len * 2);
6319         inode_inc_iversion(parent_inode);
6320         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
6321         ret = btrfs_update_inode(trans, root, parent_inode);
6322         if (ret)
6323                 btrfs_abort_transaction(trans, root, ret);
6324         return ret;
6325
6326 fail_dir_item:
6327         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6328                 u64 local_index;
6329                 int err;
6330                 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
6331                                  key.objectid, root->root_key.objectid,
6332                                  parent_ino, &local_index, name, name_len);
6333
6334         } else if (add_backref) {
6335                 u64 local_index;
6336                 int err;
6337
6338                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6339                                           ino, parent_ino, &local_index);
6340         }
6341         return ret;
6342 }
6343
6344 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6345                             struct inode *dir, struct dentry *dentry,
6346                             struct inode *inode, int backref, u64 index)
6347 {
6348         int err = btrfs_add_link(trans, dir, inode,
6349                                  dentry->d_name.name, dentry->d_name.len,
6350                                  backref, index);
6351         if (err > 0)
6352                 err = -EEXIST;
6353         return err;
6354 }
6355
6356 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6357                         umode_t mode, dev_t rdev)
6358 {
6359         struct btrfs_trans_handle *trans;
6360         struct btrfs_root *root = BTRFS_I(dir)->root;
6361         struct inode *inode = NULL;
6362         int err;
6363         int drop_inode = 0;
6364         u64 objectid;
6365         u64 index = 0;
6366
6367         if (!new_valid_dev(rdev))
6368                 return -EINVAL;
6369
6370         /*
6371          * 2 for inode item and ref
6372          * 2 for dir items
6373          * 1 for xattr if selinux is on
6374          */
6375         trans = btrfs_start_transaction(root, 5);
6376         if (IS_ERR(trans))
6377                 return PTR_ERR(trans);
6378
6379         err = btrfs_find_free_ino(root, &objectid);
6380         if (err)
6381                 goto out_unlock;
6382
6383         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6384                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6385                                 mode, &index);
6386         if (IS_ERR(inode)) {
6387                 err = PTR_ERR(inode);
6388                 goto out_unlock;
6389         }
6390
6391         /*
6392         * If the active LSM wants to access the inode during
6393         * d_instantiate it needs these. Smack checks to see
6394         * if the filesystem supports xattrs by looking at the
6395         * ops vector.
6396         */
6397         inode->i_op = &btrfs_special_inode_operations;
6398         init_special_inode(inode, inode->i_mode, rdev);
6399
6400         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6401         if (err)
6402                 goto out_unlock_inode;
6403
6404         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6405         if (err) {
6406                 goto out_unlock_inode;
6407         } else {
6408                 btrfs_update_inode(trans, root, inode);
6409                 unlock_new_inode(inode);
6410                 d_instantiate(dentry, inode);
6411         }
6412
6413 out_unlock:
6414         btrfs_end_transaction(trans, root);
6415         btrfs_balance_delayed_items(root);
6416         btrfs_btree_balance_dirty(root);
6417         if (drop_inode) {
6418                 inode_dec_link_count(inode);
6419                 iput(inode);
6420         }
6421         return err;
6422
6423 out_unlock_inode:
6424         drop_inode = 1;
6425         unlock_new_inode(inode);
6426         goto out_unlock;
6427
6428 }
6429
6430 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6431                         umode_t mode, bool excl)
6432 {
6433         struct btrfs_trans_handle *trans;
6434         struct btrfs_root *root = BTRFS_I(dir)->root;
6435         struct inode *inode = NULL;
6436         int drop_inode_on_err = 0;
6437         int err;
6438         u64 objectid;
6439         u64 index = 0;
6440
6441         /*
6442          * 2 for inode item and ref
6443          * 2 for dir items
6444          * 1 for xattr if selinux is on
6445          */
6446         trans = btrfs_start_transaction(root, 5);
6447         if (IS_ERR(trans))
6448                 return PTR_ERR(trans);
6449
6450         err = btrfs_find_free_ino(root, &objectid);
6451         if (err)
6452                 goto out_unlock;
6453
6454         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6455                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6456                                 mode, &index);
6457         if (IS_ERR(inode)) {
6458                 err = PTR_ERR(inode);
6459                 goto out_unlock;
6460         }
6461         drop_inode_on_err = 1;
6462         /*
6463         * If the active LSM wants to access the inode during
6464         * d_instantiate it needs these. Smack checks to see
6465         * if the filesystem supports xattrs by looking at the
6466         * ops vector.
6467         */
6468         inode->i_fop = &btrfs_file_operations;
6469         inode->i_op = &btrfs_file_inode_operations;
6470         inode->i_mapping->a_ops = &btrfs_aops;
6471
6472         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6473         if (err)
6474                 goto out_unlock_inode;
6475
6476         err = btrfs_update_inode(trans, root, inode);
6477         if (err)
6478                 goto out_unlock_inode;
6479
6480         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6481         if (err)
6482                 goto out_unlock_inode;
6483
6484         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6485         unlock_new_inode(inode);
6486         d_instantiate(dentry, inode);
6487
6488 out_unlock:
6489         btrfs_end_transaction(trans, root);
6490         if (err && drop_inode_on_err) {
6491                 inode_dec_link_count(inode);
6492                 iput(inode);
6493         }
6494         btrfs_balance_delayed_items(root);
6495         btrfs_btree_balance_dirty(root);
6496         return err;
6497
6498 out_unlock_inode:
6499         unlock_new_inode(inode);
6500         goto out_unlock;
6501
6502 }
6503
6504 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6505                       struct dentry *dentry)
6506 {
6507         struct btrfs_trans_handle *trans;
6508         struct btrfs_root *root = BTRFS_I(dir)->root;
6509         struct inode *inode = d_inode(old_dentry);
6510         u64 index;
6511         int err;
6512         int drop_inode = 0;
6513
6514         /* do not allow sys_link's with other subvols of the same device */
6515         if (root->objectid != BTRFS_I(inode)->root->objectid)
6516                 return -EXDEV;
6517
6518         if (inode->i_nlink >= BTRFS_LINK_MAX)
6519                 return -EMLINK;
6520
6521         err = btrfs_set_inode_index(dir, &index);
6522         if (err)
6523                 goto fail;
6524
6525         /*
6526          * 2 items for inode and inode ref
6527          * 2 items for dir items
6528          * 1 item for parent inode
6529          */
6530         trans = btrfs_start_transaction(root, 5);
6531         if (IS_ERR(trans)) {
6532                 err = PTR_ERR(trans);
6533                 goto fail;
6534         }
6535
6536         /* There are several dir indexes for this inode, clear the cache. */
6537         BTRFS_I(inode)->dir_index = 0ULL;
6538         inc_nlink(inode);
6539         inode_inc_iversion(inode);
6540         inode->i_ctime = CURRENT_TIME;
6541         ihold(inode);
6542         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6543
6544         err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
6545
6546         if (err) {
6547                 drop_inode = 1;
6548         } else {
6549                 struct dentry *parent = dentry->d_parent;
6550                 err = btrfs_update_inode(trans, root, inode);
6551                 if (err)
6552                         goto fail;
6553                 if (inode->i_nlink == 1) {
6554                         /*
6555                          * If new hard link count is 1, it's a file created
6556                          * with open(2) O_TMPFILE flag.
6557                          */
6558                         err = btrfs_orphan_del(trans, inode);
6559                         if (err)
6560                                 goto fail;
6561                 }
6562                 d_instantiate(dentry, inode);
6563                 btrfs_log_new_name(trans, inode, NULL, parent);
6564         }
6565
6566         btrfs_end_transaction(trans, root);
6567         btrfs_balance_delayed_items(root);
6568 fail:
6569         if (drop_inode) {
6570                 inode_dec_link_count(inode);
6571                 iput(inode);
6572         }
6573         btrfs_btree_balance_dirty(root);
6574         return err;
6575 }
6576
6577 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6578 {
6579         struct inode *inode = NULL;
6580         struct btrfs_trans_handle *trans;
6581         struct btrfs_root *root = BTRFS_I(dir)->root;
6582         int err = 0;
6583         int drop_on_err = 0;
6584         u64 objectid = 0;
6585         u64 index = 0;
6586
6587         /*
6588          * 2 items for inode and ref
6589          * 2 items for dir items
6590          * 1 for xattr if selinux is on
6591          */
6592         trans = btrfs_start_transaction(root, 5);
6593         if (IS_ERR(trans))
6594                 return PTR_ERR(trans);
6595
6596         err = btrfs_find_free_ino(root, &objectid);
6597         if (err)
6598                 goto out_fail;
6599
6600         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6601                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6602                                 S_IFDIR | mode, &index);
6603         if (IS_ERR(inode)) {
6604                 err = PTR_ERR(inode);
6605                 goto out_fail;
6606         }
6607
6608         drop_on_err = 1;
6609         /* these must be set before we unlock the inode */
6610         inode->i_op = &btrfs_dir_inode_operations;
6611         inode->i_fop = &btrfs_dir_file_operations;
6612
6613         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6614         if (err)
6615                 goto out_fail_inode;
6616
6617         btrfs_i_size_write(inode, 0);
6618         err = btrfs_update_inode(trans, root, inode);
6619         if (err)
6620                 goto out_fail_inode;
6621
6622         err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
6623                              dentry->d_name.len, 0, index);
6624         if (err)
6625                 goto out_fail_inode;
6626
6627         d_instantiate(dentry, inode);
6628         /*
6629          * mkdir is special.  We're unlocking after we call d_instantiate
6630          * to avoid a race with nfsd calling d_instantiate.
6631          */
6632         unlock_new_inode(inode);
6633         drop_on_err = 0;
6634
6635 out_fail:
6636         btrfs_end_transaction(trans, root);
6637         if (drop_on_err) {
6638                 inode_dec_link_count(inode);
6639                 iput(inode);
6640         }
6641         btrfs_balance_delayed_items(root);
6642         btrfs_btree_balance_dirty(root);
6643         return err;
6644
6645 out_fail_inode:
6646         unlock_new_inode(inode);
6647         goto out_fail;
6648 }
6649
6650 /* Find next extent map of a given extent map, caller needs to ensure locks */
6651 static struct extent_map *next_extent_map(struct extent_map *em)
6652 {
6653         struct rb_node *next;
6654
6655         next = rb_next(&em->rb_node);
6656         if (!next)
6657                 return NULL;
6658         return container_of(next, struct extent_map, rb_node);
6659 }
6660
6661 static struct extent_map *prev_extent_map(struct extent_map *em)
6662 {
6663         struct rb_node *prev;
6664
6665         prev = rb_prev(&em->rb_node);
6666         if (!prev)
6667                 return NULL;
6668         return container_of(prev, struct extent_map, rb_node);
6669 }
6670
6671 /* helper for btfs_get_extent.  Given an existing extent in the tree,
6672  * the existing extent is the nearest extent to map_start,
6673  * and an extent that you want to insert, deal with overlap and insert
6674  * the best fitted new extent into the tree.
6675  */
6676 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6677                                 struct extent_map *existing,
6678                                 struct extent_map *em,
6679                                 u64 map_start)
6680 {
6681         struct extent_map *prev;
6682         struct extent_map *next;
6683         u64 start;
6684         u64 end;
6685         u64 start_diff;
6686
6687         BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6688
6689         if (existing->start > map_start) {
6690                 next = existing;
6691                 prev = prev_extent_map(next);
6692         } else {
6693                 prev = existing;
6694                 next = next_extent_map(prev);
6695         }
6696
6697         start = prev ? extent_map_end(prev) : em->start;
6698         start = max_t(u64, start, em->start);
6699         end = next ? next->start : extent_map_end(em);
6700         end = min_t(u64, end, extent_map_end(em));
6701         start_diff = start - em->start;
6702         em->start = start;
6703         em->len = end - start;
6704         if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6705             !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6706                 em->block_start += start_diff;
6707                 em->block_len -= start_diff;
6708         }
6709         return add_extent_mapping(em_tree, em, 0);
6710 }
6711
6712 static noinline int uncompress_inline(struct btrfs_path *path,
6713                                       struct inode *inode, struct page *page,
6714                                       size_t pg_offset, u64 extent_offset,
6715                                       struct btrfs_file_extent_item *item)
6716 {
6717         int ret;
6718         struct extent_buffer *leaf = path->nodes[0];
6719         char *tmp;
6720         size_t max_size;
6721         unsigned long inline_size;
6722         unsigned long ptr;
6723         int compress_type;
6724
6725         WARN_ON(pg_offset != 0);
6726         compress_type = btrfs_file_extent_compression(leaf, item);
6727         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6728         inline_size = btrfs_file_extent_inline_item_len(leaf,
6729                                         btrfs_item_nr(path->slots[0]));
6730         tmp = kmalloc(inline_size, GFP_NOFS);
6731         if (!tmp)
6732                 return -ENOMEM;
6733         ptr = btrfs_file_extent_inline_start(item);
6734
6735         read_extent_buffer(leaf, tmp, ptr, inline_size);
6736
6737         max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
6738         ret = btrfs_decompress(compress_type, tmp, page,
6739                                extent_offset, inline_size, max_size);
6740         kfree(tmp);
6741         return ret;
6742 }
6743
6744 /*
6745  * a bit scary, this does extent mapping from logical file offset to the disk.
6746  * the ugly parts come from merging extents from the disk with the in-ram
6747  * representation.  This gets more complex because of the data=ordered code,
6748  * where the in-ram extents might be locked pending data=ordered completion.
6749  *
6750  * This also copies inline extents directly into the page.
6751  */
6752
6753 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6754                                     size_t pg_offset, u64 start, u64 len,
6755                                     int create)
6756 {
6757         int ret;
6758         int err = 0;
6759         u64 extent_start = 0;
6760         u64 extent_end = 0;
6761         u64 objectid = btrfs_ino(inode);
6762         u32 found_type;
6763         struct btrfs_path *path = NULL;
6764         struct btrfs_root *root = BTRFS_I(inode)->root;
6765         struct btrfs_file_extent_item *item;
6766         struct extent_buffer *leaf;
6767         struct btrfs_key found_key;
6768         struct extent_map *em = NULL;
6769         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6770         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6771         struct btrfs_trans_handle *trans = NULL;
6772         const bool new_inline = !page || create;
6773
6774 again:
6775         read_lock(&em_tree->lock);
6776         em = lookup_extent_mapping(em_tree, start, len);
6777         if (em)
6778                 em->bdev = root->fs_info->fs_devices->latest_bdev;
6779         read_unlock(&em_tree->lock);
6780
6781         if (em) {
6782                 if (em->start > start || em->start + em->len <= start)
6783                         free_extent_map(em);
6784                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6785                         free_extent_map(em);
6786                 else
6787                         goto out;
6788         }
6789         em = alloc_extent_map();
6790         if (!em) {
6791                 err = -ENOMEM;
6792                 goto out;
6793         }
6794         em->bdev = root->fs_info->fs_devices->latest_bdev;
6795         em->start = EXTENT_MAP_HOLE;
6796         em->orig_start = EXTENT_MAP_HOLE;
6797         em->len = (u64)-1;
6798         em->block_len = (u64)-1;
6799
6800         if (!path) {
6801                 path = btrfs_alloc_path();
6802                 if (!path) {
6803                         err = -ENOMEM;
6804                         goto out;
6805                 }
6806                 /*
6807                  * Chances are we'll be called again, so go ahead and do
6808                  * readahead
6809                  */
6810                 path->reada = 1;
6811         }
6812
6813         ret = btrfs_lookup_file_extent(trans, root, path,
6814                                        objectid, start, trans != NULL);
6815         if (ret < 0) {
6816                 err = ret;
6817                 goto out;
6818         }
6819
6820         if (ret != 0) {
6821                 if (path->slots[0] == 0)
6822                         goto not_found;
6823                 path->slots[0]--;
6824         }
6825
6826         leaf = path->nodes[0];
6827         item = btrfs_item_ptr(leaf, path->slots[0],
6828                               struct btrfs_file_extent_item);
6829         /* are we inside the extent that was found? */
6830         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6831         found_type = found_key.type;
6832         if (found_key.objectid != objectid ||
6833             found_type != BTRFS_EXTENT_DATA_KEY) {
6834                 /*
6835                  * If we backup past the first extent we want to move forward
6836                  * and see if there is an extent in front of us, otherwise we'll
6837                  * say there is a hole for our whole search range which can
6838                  * cause problems.
6839                  */
6840                 extent_end = start;
6841                 goto next;
6842         }
6843
6844         found_type = btrfs_file_extent_type(leaf, item);
6845         extent_start = found_key.offset;
6846         if (found_type == BTRFS_FILE_EXTENT_REG ||
6847             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6848                 extent_end = extent_start +
6849                        btrfs_file_extent_num_bytes(leaf, item);
6850         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6851                 size_t size;
6852                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6853                 extent_end = ALIGN(extent_start + size, root->sectorsize);
6854         }
6855 next:
6856         if (start >= extent_end) {
6857                 path->slots[0]++;
6858                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6859                         ret = btrfs_next_leaf(root, path);
6860                         if (ret < 0) {
6861                                 err = ret;
6862                                 goto out;
6863                         }
6864                         if (ret > 0)
6865                                 goto not_found;
6866                         leaf = path->nodes[0];
6867                 }
6868                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6869                 if (found_key.objectid != objectid ||
6870                     found_key.type != BTRFS_EXTENT_DATA_KEY)
6871                         goto not_found;
6872                 if (start + len <= found_key.offset)
6873                         goto not_found;
6874                 if (start > found_key.offset)
6875                         goto next;
6876                 em->start = start;
6877                 em->orig_start = start;
6878                 em->len = found_key.offset - start;
6879                 goto not_found_em;
6880         }
6881
6882         btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em);
6883
6884         if (found_type == BTRFS_FILE_EXTENT_REG ||
6885             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6886                 goto insert;
6887         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6888                 unsigned long ptr;
6889                 char *map;
6890                 size_t size;
6891                 size_t extent_offset;
6892                 size_t copy_size;
6893
6894                 if (new_inline)
6895                         goto out;
6896
6897                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6898                 extent_offset = page_offset(page) + pg_offset - extent_start;
6899                 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6900                                 size - extent_offset);
6901                 em->start = extent_start + extent_offset;
6902                 em->len = ALIGN(copy_size, root->sectorsize);
6903                 em->orig_block_len = em->len;
6904                 em->orig_start = em->start;
6905                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6906                 if (create == 0 && !PageUptodate(page)) {
6907                         if (btrfs_file_extent_compression(leaf, item) !=
6908                             BTRFS_COMPRESS_NONE) {
6909                                 ret = uncompress_inline(path, inode, page,
6910                                                         pg_offset,
6911                                                         extent_offset, item);
6912                                 if (ret) {
6913                                         err = ret;
6914                                         goto out;
6915                                 }
6916                         } else {
6917                                 map = kmap(page);
6918                                 read_extent_buffer(leaf, map + pg_offset, ptr,
6919                                                    copy_size);
6920                                 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6921                                         memset(map + pg_offset + copy_size, 0,
6922                                                PAGE_CACHE_SIZE - pg_offset -
6923                                                copy_size);
6924                                 }
6925                                 kunmap(page);
6926                         }
6927                         flush_dcache_page(page);
6928                 } else if (create && PageUptodate(page)) {
6929                         BUG();
6930                         if (!trans) {
6931                                 kunmap(page);
6932                                 free_extent_map(em);
6933                                 em = NULL;
6934
6935                                 btrfs_release_path(path);
6936                                 trans = btrfs_join_transaction(root);
6937
6938                                 if (IS_ERR(trans))
6939                                         return ERR_CAST(trans);
6940                                 goto again;
6941                         }
6942                         map = kmap(page);
6943                         write_extent_buffer(leaf, map + pg_offset, ptr,
6944                                             copy_size);
6945                         kunmap(page);
6946                         btrfs_mark_buffer_dirty(leaf);
6947                 }
6948                 set_extent_uptodate(io_tree, em->start,
6949                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
6950                 goto insert;
6951         }
6952 not_found:
6953         em->start = start;
6954         em->orig_start = start;
6955         em->len = len;
6956 not_found_em:
6957         em->block_start = EXTENT_MAP_HOLE;
6958         set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6959 insert:
6960         btrfs_release_path(path);
6961         if (em->start > start || extent_map_end(em) <= start) {
6962                 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6963                         em->start, em->len, start, len);
6964                 err = -EIO;
6965                 goto out;
6966         }
6967
6968         err = 0;
6969         write_lock(&em_tree->lock);
6970         ret = add_extent_mapping(em_tree, em, 0);
6971         /* it is possible that someone inserted the extent into the tree
6972          * while we had the lock dropped.  It is also possible that
6973          * an overlapping map exists in the tree
6974          */
6975         if (ret == -EEXIST) {
6976                 struct extent_map *existing;
6977
6978                 ret = 0;
6979
6980                 existing = search_extent_mapping(em_tree, start, len);
6981                 /*
6982                  * existing will always be non-NULL, since there must be
6983                  * extent causing the -EEXIST.
6984                  */
6985                 if (start >= extent_map_end(existing) ||
6986                     start <= existing->start) {
6987                         /*
6988                          * The existing extent map is the one nearest to
6989                          * the [start, start + len) range which overlaps
6990                          */
6991                         err = merge_extent_mapping(em_tree, existing,
6992                                                    em, start);
6993                         free_extent_map(existing);
6994                         if (err) {
6995                                 free_extent_map(em);
6996                                 em = NULL;
6997                         }
6998                 } else {
6999                         free_extent_map(em);
7000                         em = existing;
7001                         err = 0;
7002                 }
7003         }
7004         write_unlock(&em_tree->lock);
7005 out:
7006
7007         trace_btrfs_get_extent(root, em);
7008
7009         btrfs_free_path(path);
7010         if (trans) {
7011                 ret = btrfs_end_transaction(trans, root);
7012                 if (!err)
7013                         err = ret;
7014         }
7015         if (err) {
7016                 free_extent_map(em);
7017                 return ERR_PTR(err);
7018         }
7019         BUG_ON(!em); /* Error is always set */
7020         return em;
7021 }
7022
7023 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
7024                                            size_t pg_offset, u64 start, u64 len,
7025                                            int create)
7026 {
7027         struct extent_map *em;
7028         struct extent_map *hole_em = NULL;
7029         u64 range_start = start;
7030         u64 end;
7031         u64 found;
7032         u64 found_end;
7033         int err = 0;
7034
7035         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7036         if (IS_ERR(em))
7037                 return em;
7038         if (em) {
7039                 /*
7040                  * if our em maps to
7041                  * -  a hole or
7042                  * -  a pre-alloc extent,
7043                  * there might actually be delalloc bytes behind it.
7044                  */
7045                 if (em->block_start != EXTENT_MAP_HOLE &&
7046                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7047                         return em;
7048                 else
7049                         hole_em = em;
7050         }
7051
7052         /* check to see if we've wrapped (len == -1 or similar) */
7053         end = start + len;
7054         if (end < start)
7055                 end = (u64)-1;
7056         else
7057                 end -= 1;
7058
7059         em = NULL;
7060
7061         /* ok, we didn't find anything, lets look for delalloc */
7062         found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
7063                                  end, len, EXTENT_DELALLOC, 1);
7064         found_end = range_start + found;
7065         if (found_end < range_start)
7066                 found_end = (u64)-1;
7067
7068         /*
7069          * we didn't find anything useful, return
7070          * the original results from get_extent()
7071          */
7072         if (range_start > end || found_end <= start) {
7073                 em = hole_em;
7074                 hole_em = NULL;
7075                 goto out;
7076         }
7077
7078         /* adjust the range_start to make sure it doesn't
7079          * go backwards from the start they passed in
7080          */
7081         range_start = max(start, range_start);
7082         found = found_end - range_start;
7083
7084         if (found > 0) {
7085                 u64 hole_start = start;
7086                 u64 hole_len = len;
7087
7088                 em = alloc_extent_map();
7089                 if (!em) {
7090                         err = -ENOMEM;
7091                         goto out;
7092                 }
7093                 /*
7094                  * when btrfs_get_extent can't find anything it
7095                  * returns one huge hole
7096                  *
7097                  * make sure what it found really fits our range, and
7098                  * adjust to make sure it is based on the start from
7099                  * the caller
7100                  */
7101                 if (hole_em) {
7102                         u64 calc_end = extent_map_end(hole_em);
7103
7104                         if (calc_end <= start || (hole_em->start > end)) {
7105                                 free_extent_map(hole_em);
7106                                 hole_em = NULL;
7107                         } else {
7108                                 hole_start = max(hole_em->start, start);
7109                                 hole_len = calc_end - hole_start;
7110                         }
7111                 }
7112                 em->bdev = NULL;
7113                 if (hole_em && range_start > hole_start) {
7114                         /* our hole starts before our delalloc, so we
7115                          * have to return just the parts of the hole
7116                          * that go until  the delalloc starts
7117                          */
7118                         em->len = min(hole_len,
7119                                       range_start - hole_start);
7120                         em->start = hole_start;
7121                         em->orig_start = hole_start;
7122                         /*
7123                          * don't adjust block start at all,
7124                          * it is fixed at EXTENT_MAP_HOLE
7125                          */
7126                         em->block_start = hole_em->block_start;
7127                         em->block_len = hole_len;
7128                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7129                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7130                 } else {
7131                         em->start = range_start;
7132                         em->len = found;
7133                         em->orig_start = range_start;
7134                         em->block_start = EXTENT_MAP_DELALLOC;
7135                         em->block_len = found;
7136                 }
7137         } else if (hole_em) {
7138                 return hole_em;
7139         }
7140 out:
7141
7142         free_extent_map(hole_em);
7143         if (err) {
7144                 free_extent_map(em);
7145                 return ERR_PTR(err);
7146         }
7147         return em;
7148 }
7149
7150 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7151                                                   u64 start, u64 len)
7152 {
7153         struct btrfs_root *root = BTRFS_I(inode)->root;
7154         struct extent_map *em;
7155         struct btrfs_key ins;
7156         u64 alloc_hint;
7157         int ret;
7158
7159         alloc_hint = get_extent_allocation_hint(inode, start, len);
7160         ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
7161                                    alloc_hint, &ins, 1, 1);
7162         if (ret)
7163                 return ERR_PTR(ret);
7164
7165         em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
7166                               ins.offset, ins.offset, ins.offset, 0);
7167         if (IS_ERR(em)) {
7168                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7169                 return em;
7170         }
7171
7172         ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
7173                                            ins.offset, ins.offset, 0);
7174         if (ret) {
7175                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7176                 free_extent_map(em);
7177                 return ERR_PTR(ret);
7178         }
7179
7180         return em;
7181 }
7182
7183 /*
7184  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7185  * block must be cow'd
7186  */
7187 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7188                               u64 *orig_start, u64 *orig_block_len,
7189                               u64 *ram_bytes)
7190 {
7191         struct btrfs_trans_handle *trans;
7192         struct btrfs_path *path;
7193         int ret;
7194         struct extent_buffer *leaf;
7195         struct btrfs_root *root = BTRFS_I(inode)->root;
7196         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7197         struct btrfs_file_extent_item *fi;
7198         struct btrfs_key key;
7199         u64 disk_bytenr;
7200         u64 backref_offset;
7201         u64 extent_end;
7202         u64 num_bytes;
7203         int slot;
7204         int found_type;
7205         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7206
7207         path = btrfs_alloc_path();
7208         if (!path)
7209                 return -ENOMEM;
7210
7211         ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7212                                        offset, 0);
7213         if (ret < 0)
7214                 goto out;
7215
7216         slot = path->slots[0];
7217         if (ret == 1) {
7218                 if (slot == 0) {
7219                         /* can't find the item, must cow */
7220                         ret = 0;
7221                         goto out;
7222                 }
7223                 slot--;
7224         }
7225         ret = 0;
7226         leaf = path->nodes[0];
7227         btrfs_item_key_to_cpu(leaf, &key, slot);
7228         if (key.objectid != btrfs_ino(inode) ||
7229             key.type != BTRFS_EXTENT_DATA_KEY) {
7230                 /* not our file or wrong item type, must cow */
7231                 goto out;
7232         }
7233
7234         if (key.offset > offset) {
7235                 /* Wrong offset, must cow */
7236                 goto out;
7237         }
7238
7239         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7240         found_type = btrfs_file_extent_type(leaf, fi);
7241         if (found_type != BTRFS_FILE_EXTENT_REG &&
7242             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7243                 /* not a regular extent, must cow */
7244                 goto out;
7245         }
7246
7247         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7248                 goto out;
7249
7250         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7251         if (extent_end <= offset)
7252                 goto out;
7253
7254         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7255         if (disk_bytenr == 0)
7256                 goto out;
7257
7258         if (btrfs_file_extent_compression(leaf, fi) ||
7259             btrfs_file_extent_encryption(leaf, fi) ||
7260             btrfs_file_extent_other_encoding(leaf, fi))
7261                 goto out;
7262
7263         backref_offset = btrfs_file_extent_offset(leaf, fi);
7264
7265         if (orig_start) {
7266                 *orig_start = key.offset - backref_offset;
7267                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7268                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7269         }
7270
7271         if (btrfs_extent_readonly(root, disk_bytenr))
7272                 goto out;
7273
7274         num_bytes = min(offset + *len, extent_end) - offset;
7275         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7276                 u64 range_end;
7277
7278                 range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
7279                 ret = test_range_bit(io_tree, offset, range_end,
7280                                      EXTENT_DELALLOC, 0, NULL);
7281                 if (ret) {
7282                         ret = -EAGAIN;
7283                         goto out;
7284                 }
7285         }
7286
7287         btrfs_release_path(path);
7288
7289         /*
7290          * look for other files referencing this extent, if we
7291          * find any we must cow
7292          */
7293         trans = btrfs_join_transaction(root);
7294         if (IS_ERR(trans)) {
7295                 ret = 0;
7296                 goto out;
7297         }
7298
7299         ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
7300                                     key.offset - backref_offset, disk_bytenr);
7301         btrfs_end_transaction(trans, root);
7302         if (ret) {
7303                 ret = 0;
7304                 goto out;
7305         }
7306
7307         /*
7308          * adjust disk_bytenr and num_bytes to cover just the bytes
7309          * in this extent we are about to write.  If there
7310          * are any csums in that range we have to cow in order
7311          * to keep the csums correct
7312          */
7313         disk_bytenr += backref_offset;
7314         disk_bytenr += offset - key.offset;
7315         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
7316                                 goto out;
7317         /*
7318          * all of the above have passed, it is safe to overwrite this extent
7319          * without cow
7320          */
7321         *len = num_bytes;
7322         ret = 1;
7323 out:
7324         btrfs_free_path(path);
7325         return ret;
7326 }
7327
7328 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7329 {
7330         struct radix_tree_root *root = &inode->i_mapping->page_tree;
7331         int found = false;
7332         void **pagep = NULL;
7333         struct page *page = NULL;
7334         int start_idx;
7335         int end_idx;
7336
7337         start_idx = start >> PAGE_CACHE_SHIFT;
7338
7339         /*
7340          * end is the last byte in the last page.  end == start is legal
7341          */
7342         end_idx = end >> PAGE_CACHE_SHIFT;
7343
7344         rcu_read_lock();
7345
7346         /* Most of the code in this while loop is lifted from
7347          * find_get_page.  It's been modified to begin searching from a
7348          * page and return just the first page found in that range.  If the
7349          * found idx is less than or equal to the end idx then we know that
7350          * a page exists.  If no pages are found or if those pages are
7351          * outside of the range then we're fine (yay!) */
7352         while (page == NULL &&
7353                radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7354                 page = radix_tree_deref_slot(pagep);
7355                 if (unlikely(!page))
7356                         break;
7357
7358                 if (radix_tree_exception(page)) {
7359                         if (radix_tree_deref_retry(page)) {
7360                                 page = NULL;
7361                                 continue;
7362                         }
7363                         /*
7364                          * Otherwise, shmem/tmpfs must be storing a swap entry
7365                          * here as an exceptional entry: so return it without
7366                          * attempting to raise page count.
7367                          */
7368                         page = NULL;
7369                         break; /* TODO: Is this relevant for this use case? */
7370                 }
7371
7372                 if (!page_cache_get_speculative(page)) {
7373                         page = NULL;
7374                         continue;
7375                 }
7376
7377                 /*
7378                  * Has the page moved?
7379                  * This is part of the lockless pagecache protocol. See
7380                  * include/linux/pagemap.h for details.
7381                  */
7382                 if (unlikely(page != *pagep)) {
7383                         page_cache_release(page);
7384                         page = NULL;
7385                 }
7386         }
7387
7388         if (page) {
7389                 if (page->index <= end_idx)
7390                         found = true;
7391                 page_cache_release(page);
7392         }
7393
7394         rcu_read_unlock();
7395         return found;
7396 }
7397
7398 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7399                               struct extent_state **cached_state, int writing)
7400 {
7401         struct btrfs_ordered_extent *ordered;
7402         int ret = 0;
7403
7404         while (1) {
7405                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7406                                  0, cached_state);
7407                 /*
7408                  * We're concerned with the entire range that we're going to be
7409                  * doing DIO to, so we need to make sure theres no ordered
7410                  * extents in this range.
7411                  */
7412                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
7413                                                      lockend - lockstart + 1);
7414
7415                 /*
7416                  * We need to make sure there are no buffered pages in this
7417                  * range either, we could have raced between the invalidate in
7418                  * generic_file_direct_write and locking the extent.  The
7419                  * invalidate needs to happen so that reads after a write do not
7420                  * get stale data.
7421                  */
7422                 if (!ordered &&
7423                     (!writing ||
7424                      !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7425                         break;
7426
7427                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7428                                      cached_state, GFP_NOFS);
7429
7430                 if (ordered) {
7431                         btrfs_start_ordered_extent(inode, ordered, 1);
7432                         btrfs_put_ordered_extent(ordered);
7433                 } else {
7434                         /* Screw you mmap */
7435                         ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
7436                         if (ret)
7437                                 break;
7438                         ret = filemap_fdatawait_range(inode->i_mapping,
7439                                                       lockstart,
7440                                                       lockend);
7441                         if (ret)
7442                                 break;
7443
7444                         /*
7445                          * If we found a page that couldn't be invalidated just
7446                          * fall back to buffered.
7447                          */
7448                         ret = invalidate_inode_pages2_range(inode->i_mapping,
7449                                         lockstart >> PAGE_CACHE_SHIFT,
7450                                         lockend >> PAGE_CACHE_SHIFT);
7451                         if (ret)
7452                                 break;
7453                 }
7454
7455                 cond_resched();
7456         }
7457
7458         return ret;
7459 }
7460
7461 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7462                                            u64 len, u64 orig_start,
7463                                            u64 block_start, u64 block_len,
7464                                            u64 orig_block_len, u64 ram_bytes,
7465                                            int type)
7466 {
7467         struct extent_map_tree *em_tree;
7468         struct extent_map *em;
7469         struct btrfs_root *root = BTRFS_I(inode)->root;
7470         int ret;
7471
7472         em_tree = &BTRFS_I(inode)->extent_tree;
7473         em = alloc_extent_map();
7474         if (!em)
7475                 return ERR_PTR(-ENOMEM);
7476
7477         em->start = start;
7478         em->orig_start = orig_start;
7479         em->mod_start = start;
7480         em->mod_len = len;
7481         em->len = len;
7482         em->block_len = block_len;
7483         em->block_start = block_start;
7484         em->bdev = root->fs_info->fs_devices->latest_bdev;
7485         em->orig_block_len = orig_block_len;
7486         em->ram_bytes = ram_bytes;
7487         em->generation = -1;
7488         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7489         if (type == BTRFS_ORDERED_PREALLOC)
7490                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7491
7492         do {
7493                 btrfs_drop_extent_cache(inode, em->start,
7494                                 em->start + em->len - 1, 0);
7495                 write_lock(&em_tree->lock);
7496                 ret = add_extent_mapping(em_tree, em, 1);
7497                 write_unlock(&em_tree->lock);
7498         } while (ret == -EEXIST);
7499
7500         if (ret) {
7501                 free_extent_map(em);
7502                 return ERR_PTR(ret);
7503         }
7504
7505         return em;
7506 }
7507
7508 struct btrfs_dio_data {
7509         u64 outstanding_extents;
7510         u64 reserve;
7511 };
7512
7513 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7514                                    struct buffer_head *bh_result, int create)
7515 {
7516         struct extent_map *em;
7517         struct btrfs_root *root = BTRFS_I(inode)->root;
7518         struct extent_state *cached_state = NULL;
7519         struct btrfs_dio_data *dio_data = NULL;
7520         u64 start = iblock << inode->i_blkbits;
7521         u64 lockstart, lockend;
7522         u64 len = bh_result->b_size;
7523         int unlock_bits = EXTENT_LOCKED;
7524         int ret = 0;
7525
7526         if (create)
7527                 unlock_bits |= EXTENT_DIRTY;
7528         else
7529                 len = min_t(u64, len, root->sectorsize);
7530
7531         lockstart = start;
7532         lockend = start + len - 1;
7533
7534         if (current->journal_info) {
7535                 /*
7536                  * Need to pull our outstanding extents and set journal_info to NULL so
7537                  * that anything that needs to check if there's a transction doesn't get
7538                  * confused.
7539                  */
7540                 dio_data = current->journal_info;
7541                 current->journal_info = NULL;
7542         }
7543
7544         /*
7545          * If this errors out it's because we couldn't invalidate pagecache for
7546          * this range and we need to fallback to buffered.
7547          */
7548         if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
7549                 return -ENOTBLK;
7550
7551         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
7552         if (IS_ERR(em)) {
7553                 ret = PTR_ERR(em);
7554                 goto unlock_err;
7555         }
7556
7557         /*
7558          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7559          * io.  INLINE is special, and we could probably kludge it in here, but
7560          * it's still buffered so for safety lets just fall back to the generic
7561          * buffered path.
7562          *
7563          * For COMPRESSED we _have_ to read the entire extent in so we can
7564          * decompress it, so there will be buffering required no matter what we
7565          * do, so go ahead and fallback to buffered.
7566          *
7567          * We return -ENOTBLK because thats what makes DIO go ahead and go back
7568          * to buffered IO.  Don't blame me, this is the price we pay for using
7569          * the generic code.
7570          */
7571         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7572             em->block_start == EXTENT_MAP_INLINE) {
7573                 free_extent_map(em);
7574                 ret = -ENOTBLK;
7575                 goto unlock_err;
7576         }
7577
7578         /* Just a good old fashioned hole, return */
7579         if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7580                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7581                 free_extent_map(em);
7582                 goto unlock_err;
7583         }
7584
7585         /*
7586          * We don't allocate a new extent in the following cases
7587          *
7588          * 1) The inode is marked as NODATACOW.  In this case we'll just use the
7589          * existing extent.
7590          * 2) The extent is marked as PREALLOC.  We're good to go here and can
7591          * just use the extent.
7592          *
7593          */
7594         if (!create) {
7595                 len = min(len, em->len - (start - em->start));
7596                 lockstart = start + len;
7597                 goto unlock;
7598         }
7599
7600         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7601             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7602              em->block_start != EXTENT_MAP_HOLE)) {
7603                 int type;
7604                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7605
7606                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7607                         type = BTRFS_ORDERED_PREALLOC;
7608                 else
7609                         type = BTRFS_ORDERED_NOCOW;
7610                 len = min(len, em->len - (start - em->start));
7611                 block_start = em->block_start + (start - em->start);
7612
7613                 if (can_nocow_extent(inode, start, &len, &orig_start,
7614                                      &orig_block_len, &ram_bytes) == 1) {
7615                         if (type == BTRFS_ORDERED_PREALLOC) {
7616                                 free_extent_map(em);
7617                                 em = create_pinned_em(inode, start, len,
7618                                                        orig_start,
7619                                                        block_start, len,
7620                                                        orig_block_len,
7621                                                        ram_bytes, type);
7622                                 if (IS_ERR(em)) {
7623                                         ret = PTR_ERR(em);
7624                                         goto unlock_err;
7625                                 }
7626                         }
7627
7628                         ret = btrfs_add_ordered_extent_dio(inode, start,
7629                                            block_start, len, len, type);
7630                         if (ret) {
7631                                 free_extent_map(em);
7632                                 goto unlock_err;
7633                         }
7634                         goto unlock;
7635                 }
7636         }
7637
7638         /*
7639          * this will cow the extent, reset the len in case we changed
7640          * it above
7641          */
7642         len = bh_result->b_size;
7643         free_extent_map(em);
7644         em = btrfs_new_extent_direct(inode, start, len);
7645         if (IS_ERR(em)) {
7646                 ret = PTR_ERR(em);
7647                 goto unlock_err;
7648         }
7649         len = min(len, em->len - (start - em->start));
7650 unlock:
7651         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7652                 inode->i_blkbits;
7653         bh_result->b_size = len;
7654         bh_result->b_bdev = em->bdev;
7655         set_buffer_mapped(bh_result);
7656         if (create) {
7657                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7658                         set_buffer_new(bh_result);
7659
7660                 /*
7661                  * Need to update the i_size under the extent lock so buffered
7662                  * readers will get the updated i_size when we unlock.
7663                  */
7664                 if (start + len > i_size_read(inode))
7665                         i_size_write(inode, start + len);
7666
7667                 /*
7668                  * If we have an outstanding_extents count still set then we're
7669                  * within our reservation, otherwise we need to adjust our inode
7670                  * counter appropriately.
7671                  */
7672                 if (dio_data->outstanding_extents) {
7673                         (dio_data->outstanding_extents)--;
7674                 } else {
7675                         spin_lock(&BTRFS_I(inode)->lock);
7676                         BTRFS_I(inode)->outstanding_extents++;
7677                         spin_unlock(&BTRFS_I(inode)->lock);
7678                 }
7679
7680                 btrfs_free_reserved_data_space(inode, start, len);
7681                 WARN_ON(dio_data->reserve < len);
7682                 dio_data->reserve -= len;
7683                 current->journal_info = dio_data;
7684         }
7685
7686         /*
7687          * In the case of write we need to clear and unlock the entire range,
7688          * in the case of read we need to unlock only the end area that we
7689          * aren't using if there is any left over space.
7690          */
7691         if (lockstart < lockend) {
7692                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7693                                  lockend, unlock_bits, 1, 0,
7694                                  &cached_state, GFP_NOFS);
7695         } else {
7696                 free_extent_state(cached_state);
7697         }
7698
7699         free_extent_map(em);
7700
7701         return 0;
7702
7703 unlock_err:
7704         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7705                          unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7706         if (dio_data)
7707                 current->journal_info = dio_data;
7708         return ret;
7709 }
7710
7711 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7712                                         int rw, int mirror_num)
7713 {
7714         struct btrfs_root *root = BTRFS_I(inode)->root;
7715         int ret;
7716
7717         BUG_ON(rw & REQ_WRITE);
7718
7719         bio_get(bio);
7720
7721         ret = btrfs_bio_wq_end_io(root->fs_info, bio,
7722                                   BTRFS_WQ_ENDIO_DIO_REPAIR);
7723         if (ret)
7724                 goto err;
7725
7726         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
7727 err:
7728         bio_put(bio);
7729         return ret;
7730 }
7731
7732 static int btrfs_check_dio_repairable(struct inode *inode,
7733                                       struct bio *failed_bio,
7734                                       struct io_failure_record *failrec,
7735                                       int failed_mirror)
7736 {
7737         int num_copies;
7738
7739         num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
7740                                       failrec->logical, failrec->len);
7741         if (num_copies == 1) {
7742                 /*
7743                  * we only have a single copy of the data, so don't bother with
7744                  * all the retry and error correction code that follows. no
7745                  * matter what the error is, it is very likely to persist.
7746                  */
7747                 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
7748                          num_copies, failrec->this_mirror, failed_mirror);
7749                 return 0;
7750         }
7751
7752         failrec->failed_mirror = failed_mirror;
7753         failrec->this_mirror++;
7754         if (failrec->this_mirror == failed_mirror)
7755                 failrec->this_mirror++;
7756
7757         if (failrec->this_mirror > num_copies) {
7758                 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
7759                          num_copies, failrec->this_mirror, failed_mirror);
7760                 return 0;
7761         }
7762
7763         return 1;
7764 }
7765
7766 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7767                           struct page *page, u64 start, u64 end,
7768                           int failed_mirror, bio_end_io_t *repair_endio,
7769                           void *repair_arg)
7770 {
7771         struct io_failure_record *failrec;
7772         struct bio *bio;
7773         int isector;
7774         int read_mode;
7775         int ret;
7776
7777         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
7778
7779         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7780         if (ret)
7781                 return ret;
7782
7783         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7784                                          failed_mirror);
7785         if (!ret) {
7786                 free_io_failure(inode, failrec);
7787                 return -EIO;
7788         }
7789
7790         if (failed_bio->bi_vcnt > 1)
7791                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
7792         else
7793                 read_mode = READ_SYNC;
7794
7795         isector = start - btrfs_io_bio(failed_bio)->logical;
7796         isector >>= inode->i_sb->s_blocksize_bits;
7797         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7798                                       0, isector, repair_endio, repair_arg);
7799         if (!bio) {
7800                 free_io_failure(inode, failrec);
7801                 return -EIO;
7802         }
7803
7804         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7805                     "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7806                     read_mode, failrec->this_mirror, failrec->in_validation);
7807
7808         ret = submit_dio_repair_bio(inode, bio, read_mode,
7809                                     failrec->this_mirror);
7810         if (ret) {
7811                 free_io_failure(inode, failrec);
7812                 bio_put(bio);
7813         }
7814
7815         return ret;
7816 }
7817
7818 struct btrfs_retry_complete {
7819         struct completion done;
7820         struct inode *inode;
7821         u64 start;
7822         int uptodate;
7823 };
7824
7825 static void btrfs_retry_endio_nocsum(struct bio *bio)
7826 {
7827         struct btrfs_retry_complete *done = bio->bi_private;
7828         struct bio_vec *bvec;
7829         int i;
7830
7831         if (bio->bi_error)
7832                 goto end;
7833
7834         done->uptodate = 1;
7835         bio_for_each_segment_all(bvec, bio, i)
7836                 clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
7837 end:
7838         complete(&done->done);
7839         bio_put(bio);
7840 }
7841
7842 static int __btrfs_correct_data_nocsum(struct inode *inode,
7843                                        struct btrfs_io_bio *io_bio)
7844 {
7845         struct bio_vec *bvec;
7846         struct btrfs_retry_complete done;
7847         u64 start;
7848         int i;
7849         int ret;
7850
7851         start = io_bio->logical;
7852         done.inode = inode;
7853
7854         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7855 try_again:
7856                 done.uptodate = 0;
7857                 done.start = start;
7858                 init_completion(&done.done);
7859
7860                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7861                                      start + bvec->bv_len - 1,
7862                                      io_bio->mirror_num,
7863                                      btrfs_retry_endio_nocsum, &done);
7864                 if (ret)
7865                         return ret;
7866
7867                 wait_for_completion(&done.done);
7868
7869                 if (!done.uptodate) {
7870                         /* We might have another mirror, so try again */
7871                         goto try_again;
7872                 }
7873
7874                 start += bvec->bv_len;
7875         }
7876
7877         return 0;
7878 }
7879
7880 static void btrfs_retry_endio(struct bio *bio)
7881 {
7882         struct btrfs_retry_complete *done = bio->bi_private;
7883         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7884         struct bio_vec *bvec;
7885         int uptodate;
7886         int ret;
7887         int i;
7888
7889         if (bio->bi_error)
7890                 goto end;
7891
7892         uptodate = 1;
7893         bio_for_each_segment_all(bvec, bio, i) {
7894                 ret = __readpage_endio_check(done->inode, io_bio, i,
7895                                              bvec->bv_page, 0,
7896                                              done->start, bvec->bv_len);
7897                 if (!ret)
7898                         clean_io_failure(done->inode, done->start,
7899                                          bvec->bv_page, 0);
7900                 else
7901                         uptodate = 0;
7902         }
7903
7904         done->uptodate = uptodate;
7905 end:
7906         complete(&done->done);
7907         bio_put(bio);
7908 }
7909
7910 static int __btrfs_subio_endio_read(struct inode *inode,
7911                                     struct btrfs_io_bio *io_bio, int err)
7912 {
7913         struct bio_vec *bvec;
7914         struct btrfs_retry_complete done;
7915         u64 start;
7916         u64 offset = 0;
7917         int i;
7918         int ret;
7919
7920         err = 0;
7921         start = io_bio->logical;
7922         done.inode = inode;
7923
7924         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7925                 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
7926                                              0, start, bvec->bv_len);
7927                 if (likely(!ret))
7928                         goto next;
7929 try_again:
7930                 done.uptodate = 0;
7931                 done.start = start;
7932                 init_completion(&done.done);
7933
7934                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7935                                      start + bvec->bv_len - 1,
7936                                      io_bio->mirror_num,
7937                                      btrfs_retry_endio, &done);
7938                 if (ret) {
7939                         err = ret;
7940                         goto next;
7941                 }
7942
7943                 wait_for_completion(&done.done);
7944
7945                 if (!done.uptodate) {
7946                         /* We might have another mirror, so try again */
7947                         goto try_again;
7948                 }
7949 next:
7950                 offset += bvec->bv_len;
7951                 start += bvec->bv_len;
7952         }
7953
7954         return err;
7955 }
7956
7957 static int btrfs_subio_endio_read(struct inode *inode,
7958                                   struct btrfs_io_bio *io_bio, int err)
7959 {
7960         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7961
7962         if (skip_csum) {
7963                 if (unlikely(err))
7964                         return __btrfs_correct_data_nocsum(inode, io_bio);
7965                 else
7966                         return 0;
7967         } else {
7968                 return __btrfs_subio_endio_read(inode, io_bio, err);
7969         }
7970 }
7971
7972 static void btrfs_endio_direct_read(struct bio *bio)
7973 {
7974         struct btrfs_dio_private *dip = bio->bi_private;
7975         struct inode *inode = dip->inode;
7976         struct bio *dio_bio;
7977         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7978         int err = bio->bi_error;
7979
7980         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
7981                 err = btrfs_subio_endio_read(inode, io_bio, err);
7982
7983         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
7984                       dip->logical_offset + dip->bytes - 1);
7985         dio_bio = dip->dio_bio;
7986
7987         kfree(dip);
7988
7989         dio_end_io(dio_bio, bio->bi_error);
7990
7991         if (io_bio->end_io)
7992                 io_bio->end_io(io_bio, err);
7993         bio_put(bio);
7994 }
7995
7996 static void btrfs_endio_direct_write(struct bio *bio)
7997 {
7998         struct btrfs_dio_private *dip = bio->bi_private;
7999         struct inode *inode = dip->inode;
8000         struct btrfs_root *root = BTRFS_I(inode)->root;
8001         struct btrfs_ordered_extent *ordered = NULL;
8002         u64 ordered_offset = dip->logical_offset;
8003         u64 ordered_bytes = dip->bytes;
8004         struct bio *dio_bio;
8005         int ret;
8006
8007 again:
8008         ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8009                                                    &ordered_offset,
8010                                                    ordered_bytes,
8011                                                    !bio->bi_error);
8012         if (!ret)
8013                 goto out_test;
8014
8015         btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
8016                         finish_ordered_fn, NULL, NULL);
8017         btrfs_queue_work(root->fs_info->endio_write_workers,
8018                          &ordered->work);
8019 out_test:
8020         /*
8021          * our bio might span multiple ordered extents.  If we haven't
8022          * completed the accounting for the whole dio, go back and try again
8023          */
8024         if (ordered_offset < dip->logical_offset + dip->bytes) {
8025                 ordered_bytes = dip->logical_offset + dip->bytes -
8026                         ordered_offset;
8027                 ordered = NULL;
8028                 goto again;
8029         }
8030         dio_bio = dip->dio_bio;
8031
8032         kfree(dip);
8033
8034         dio_end_io(dio_bio, bio->bi_error);
8035         bio_put(bio);
8036 }
8037
8038 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
8039                                     struct bio *bio, int mirror_num,
8040                                     unsigned long bio_flags, u64 offset)
8041 {
8042         int ret;
8043         struct btrfs_root *root = BTRFS_I(inode)->root;
8044         ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
8045         BUG_ON(ret); /* -ENOMEM */
8046         return 0;
8047 }
8048
8049 static void btrfs_end_dio_bio(struct bio *bio)
8050 {
8051         struct btrfs_dio_private *dip = bio->bi_private;
8052         int err = bio->bi_error;
8053
8054         if (err)
8055                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8056                            "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
8057                            btrfs_ino(dip->inode), bio->bi_rw,
8058                            (unsigned long long)bio->bi_iter.bi_sector,
8059                            bio->bi_iter.bi_size, err);
8060
8061         if (dip->subio_endio)
8062                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8063
8064         if (err) {
8065                 dip->errors = 1;
8066
8067                 /*
8068                  * before atomic variable goto zero, we must make sure
8069                  * dip->errors is perceived to be set.
8070                  */
8071                 smp_mb__before_atomic();
8072         }
8073
8074         /* if there are more bios still pending for this dio, just exit */
8075         if (!atomic_dec_and_test(&dip->pending_bios))
8076                 goto out;
8077
8078         if (dip->errors) {
8079                 bio_io_error(dip->orig_bio);
8080         } else {
8081                 dip->dio_bio->bi_error = 0;
8082                 bio_endio(dip->orig_bio);
8083         }
8084 out:
8085         bio_put(bio);
8086 }
8087
8088 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
8089                                        u64 first_sector, gfp_t gfp_flags)
8090 {
8091         struct bio *bio;
8092         bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
8093         if (bio)
8094                 bio_associate_current(bio);
8095         return bio;
8096 }
8097
8098 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
8099                                                  struct inode *inode,
8100                                                  struct btrfs_dio_private *dip,
8101                                                  struct bio *bio,
8102                                                  u64 file_offset)
8103 {
8104         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8105         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8106         int ret;
8107
8108         /*
8109          * We load all the csum data we need when we submit
8110          * the first bio to reduce the csum tree search and
8111          * contention.
8112          */
8113         if (dip->logical_offset == file_offset) {
8114                 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
8115                                                 file_offset);
8116                 if (ret)
8117                         return ret;
8118         }
8119
8120         if (bio == dip->orig_bio)
8121                 return 0;
8122
8123         file_offset -= dip->logical_offset;
8124         file_offset >>= inode->i_sb->s_blocksize_bits;
8125         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8126
8127         return 0;
8128 }
8129
8130 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
8131                                          int rw, u64 file_offset, int skip_sum,
8132                                          int async_submit)
8133 {
8134         struct btrfs_dio_private *dip = bio->bi_private;
8135         int write = rw & REQ_WRITE;
8136         struct btrfs_root *root = BTRFS_I(inode)->root;
8137         int ret;
8138
8139         if (async_submit)
8140                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8141
8142         bio_get(bio);
8143
8144         if (!write) {
8145                 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
8146                                 BTRFS_WQ_ENDIO_DATA);
8147                 if (ret)
8148                         goto err;
8149         }
8150
8151         if (skip_sum)
8152                 goto map;
8153
8154         if (write && async_submit) {
8155                 ret = btrfs_wq_submit_bio(root->fs_info,
8156                                    inode, rw, bio, 0, 0,
8157                                    file_offset,
8158                                    __btrfs_submit_bio_start_direct_io,
8159                                    __btrfs_submit_bio_done);
8160                 goto err;
8161         } else if (write) {
8162                 /*
8163                  * If we aren't doing async submit, calculate the csum of the
8164                  * bio now.
8165                  */
8166                 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
8167                 if (ret)
8168                         goto err;
8169         } else {
8170                 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
8171                                                      file_offset);
8172                 if (ret)
8173                         goto err;
8174         }
8175 map:
8176         ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
8177 err:
8178         bio_put(bio);
8179         return ret;
8180 }
8181
8182 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8183                                     int skip_sum)
8184 {
8185         struct inode *inode = dip->inode;
8186         struct btrfs_root *root = BTRFS_I(inode)->root;
8187         struct bio *bio;
8188         struct bio *orig_bio = dip->orig_bio;
8189         struct bio_vec *bvec = orig_bio->bi_io_vec;
8190         u64 start_sector = orig_bio->bi_iter.bi_sector;
8191         u64 file_offset = dip->logical_offset;
8192         u64 submit_len = 0;
8193         u64 map_length;
8194         int nr_pages = 0;
8195         int ret;
8196         int async_submit = 0;
8197
8198         map_length = orig_bio->bi_iter.bi_size;
8199         ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
8200                               &map_length, NULL, 0);
8201         if (ret)
8202                 return -EIO;
8203
8204         if (map_length >= orig_bio->bi_iter.bi_size) {
8205                 bio = orig_bio;
8206                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8207                 goto submit;
8208         }
8209
8210         /* async crcs make it difficult to collect full stripe writes. */
8211         if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8212                 async_submit = 0;
8213         else
8214                 async_submit = 1;
8215
8216         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
8217         if (!bio)
8218                 return -ENOMEM;
8219
8220         bio->bi_private = dip;
8221         bio->bi_end_io = btrfs_end_dio_bio;
8222         btrfs_io_bio(bio)->logical = file_offset;
8223         atomic_inc(&dip->pending_bios);
8224
8225         while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
8226                 if (map_length < submit_len + bvec->bv_len ||
8227                     bio_add_page(bio, bvec->bv_page, bvec->bv_len,
8228                                  bvec->bv_offset) < bvec->bv_len) {
8229                         /*
8230                          * inc the count before we submit the bio so
8231                          * we know the end IO handler won't happen before
8232                          * we inc the count. Otherwise, the dip might get freed
8233                          * before we're done setting it up
8234                          */
8235                         atomic_inc(&dip->pending_bios);
8236                         ret = __btrfs_submit_dio_bio(bio, inode, rw,
8237                                                      file_offset, skip_sum,
8238                                                      async_submit);
8239                         if (ret) {
8240                                 bio_put(bio);
8241                                 atomic_dec(&dip->pending_bios);
8242                                 goto out_err;
8243                         }
8244
8245                         start_sector += submit_len >> 9;
8246                         file_offset += submit_len;
8247
8248                         submit_len = 0;
8249                         nr_pages = 0;
8250
8251                         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8252                                                   start_sector, GFP_NOFS);
8253                         if (!bio)
8254                                 goto out_err;
8255                         bio->bi_private = dip;
8256                         bio->bi_end_io = btrfs_end_dio_bio;
8257                         btrfs_io_bio(bio)->logical = file_offset;
8258
8259                         map_length = orig_bio->bi_iter.bi_size;
8260                         ret = btrfs_map_block(root->fs_info, rw,
8261                                               start_sector << 9,
8262                                               &map_length, NULL, 0);
8263                         if (ret) {
8264                                 bio_put(bio);
8265                                 goto out_err;
8266                         }
8267                 } else {
8268                         submit_len += bvec->bv_len;
8269                         nr_pages++;
8270                         bvec++;
8271                 }
8272         }
8273
8274 submit:
8275         ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
8276                                      async_submit);
8277         if (!ret)
8278                 return 0;
8279
8280         bio_put(bio);
8281 out_err:
8282         dip->errors = 1;
8283         /*
8284          * before atomic variable goto zero, we must
8285          * make sure dip->errors is perceived to be set.
8286          */
8287         smp_mb__before_atomic();
8288         if (atomic_dec_and_test(&dip->pending_bios))
8289                 bio_io_error(dip->orig_bio);
8290
8291         /* bio_end_io() will handle error, so we needn't return it */
8292         return 0;
8293 }
8294
8295 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8296                                 struct inode *inode, loff_t file_offset)
8297 {
8298         struct btrfs_dio_private *dip = NULL;
8299         struct bio *io_bio = NULL;
8300         struct btrfs_io_bio *btrfs_bio;
8301         int skip_sum;
8302         int write = rw & REQ_WRITE;
8303         int ret = 0;
8304
8305         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8306
8307         io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
8308         if (!io_bio) {
8309                 ret = -ENOMEM;
8310                 goto free_ordered;
8311         }
8312
8313         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8314         if (!dip) {
8315                 ret = -ENOMEM;
8316                 goto free_ordered;
8317         }
8318
8319         dip->private = dio_bio->bi_private;
8320         dip->inode = inode;
8321         dip->logical_offset = file_offset;
8322         dip->bytes = dio_bio->bi_iter.bi_size;
8323         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8324         io_bio->bi_private = dip;
8325         dip->orig_bio = io_bio;
8326         dip->dio_bio = dio_bio;
8327         atomic_set(&dip->pending_bios, 0);
8328         btrfs_bio = btrfs_io_bio(io_bio);
8329         btrfs_bio->logical = file_offset;
8330
8331         if (write) {
8332                 io_bio->bi_end_io = btrfs_endio_direct_write;
8333         } else {
8334                 io_bio->bi_end_io = btrfs_endio_direct_read;
8335                 dip->subio_endio = btrfs_subio_endio_read;
8336         }
8337
8338         ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
8339         if (!ret)
8340                 return;
8341
8342         if (btrfs_bio->end_io)
8343                 btrfs_bio->end_io(btrfs_bio, ret);
8344
8345 free_ordered:
8346         /*
8347          * If we arrived here it means either we failed to submit the dip
8348          * or we either failed to clone the dio_bio or failed to allocate the
8349          * dip. If we cloned the dio_bio and allocated the dip, we can just
8350          * call bio_endio against our io_bio so that we get proper resource
8351          * cleanup if we fail to submit the dip, otherwise, we must do the
8352          * same as btrfs_endio_direct_[write|read] because we can't call these
8353          * callbacks - they require an allocated dip and a clone of dio_bio.
8354          */
8355         if (io_bio && dip) {
8356                 io_bio->bi_error = -EIO;
8357                 bio_endio(io_bio);
8358                 /*
8359                  * The end io callbacks free our dip, do the final put on io_bio
8360                  * and all the cleanup and final put for dio_bio (through
8361                  * dio_end_io()).
8362                  */
8363                 dip = NULL;
8364                 io_bio = NULL;
8365         } else {
8366                 if (write) {
8367                         struct btrfs_ordered_extent *ordered;
8368
8369                         ordered = btrfs_lookup_ordered_extent(inode,
8370                                                               file_offset);
8371                         set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
8372                         /*
8373                          * Decrements our ref on the ordered extent and removes
8374                          * the ordered extent from the inode's ordered tree,
8375                          * doing all the proper resource cleanup such as for the
8376                          * reserved space and waking up any waiters for this
8377                          * ordered extent (through btrfs_remove_ordered_extent).
8378                          */
8379                         btrfs_finish_ordered_io(ordered);
8380                 } else {
8381                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8382                               file_offset + dio_bio->bi_iter.bi_size - 1);
8383                 }
8384                 dio_bio->bi_error = -EIO;
8385                 /*
8386                  * Releases and cleans up our dio_bio, no need to bio_put()
8387                  * nor bio_endio()/bio_io_error() against dio_bio.
8388                  */
8389                 dio_end_io(dio_bio, ret);
8390         }
8391         if (io_bio)
8392                 bio_put(io_bio);
8393         kfree(dip);
8394 }
8395
8396 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
8397                         const struct iov_iter *iter, loff_t offset)
8398 {
8399         int seg;
8400         int i;
8401         unsigned blocksize_mask = root->sectorsize - 1;
8402         ssize_t retval = -EINVAL;
8403
8404         if (offset & blocksize_mask)
8405                 goto out;
8406
8407         if (iov_iter_alignment(iter) & blocksize_mask)
8408                 goto out;
8409
8410         /* If this is a write we don't need to check anymore */
8411         if (iov_iter_rw(iter) == WRITE)
8412                 return 0;
8413         /*
8414          * Check to make sure we don't have duplicate iov_base's in this
8415          * iovec, if so return EINVAL, otherwise we'll get csum errors
8416          * when reading back.
8417          */
8418         for (seg = 0; seg < iter->nr_segs; seg++) {
8419                 for (i = seg + 1; i < iter->nr_segs; i++) {
8420                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8421                                 goto out;
8422                 }
8423         }
8424         retval = 0;
8425 out:
8426         return retval;
8427 }
8428
8429 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8430                                loff_t offset)
8431 {
8432         struct file *file = iocb->ki_filp;
8433         struct inode *inode = file->f_mapping->host;
8434         struct btrfs_root *root = BTRFS_I(inode)->root;
8435         struct btrfs_dio_data dio_data = { 0 };
8436         size_t count = 0;
8437         int flags = 0;
8438         bool wakeup = true;
8439         bool relock = false;
8440         ssize_t ret;
8441
8442         if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
8443                 return 0;
8444
8445         inode_dio_begin(inode);
8446         smp_mb__after_atomic();
8447
8448         /*
8449          * The generic stuff only does filemap_write_and_wait_range, which
8450          * isn't enough if we've written compressed pages to this area, so
8451          * we need to flush the dirty pages again to make absolutely sure
8452          * that any outstanding dirty pages are on disk.
8453          */
8454         count = iov_iter_count(iter);
8455         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8456                      &BTRFS_I(inode)->runtime_flags))
8457                 filemap_fdatawrite_range(inode->i_mapping, offset,
8458                                          offset + count - 1);
8459
8460         if (iov_iter_rw(iter) == WRITE) {
8461                 /*
8462                  * If the write DIO is beyond the EOF, we need update
8463                  * the isize, but it is protected by i_mutex. So we can
8464                  * not unlock the i_mutex at this case.
8465                  */
8466                 if (offset + count <= inode->i_size) {
8467                         mutex_unlock(&inode->i_mutex);
8468                         relock = true;
8469                 }
8470                 ret = btrfs_delalloc_reserve_space(inode, offset, count);
8471                 if (ret)
8472                         goto out;
8473                 dio_data.outstanding_extents = div64_u64(count +
8474                                                 BTRFS_MAX_EXTENT_SIZE - 1,
8475                                                 BTRFS_MAX_EXTENT_SIZE);
8476
8477                 /*
8478                  * We need to know how many extents we reserved so that we can
8479                  * do the accounting properly if we go over the number we
8480                  * originally calculated.  Abuse current->journal_info for this.
8481                  */
8482                 dio_data.reserve = round_up(count, root->sectorsize);
8483                 current->journal_info = &dio_data;
8484         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8485                                      &BTRFS_I(inode)->runtime_flags)) {
8486                 inode_dio_end(inode);
8487                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8488                 wakeup = false;
8489         }
8490
8491         ret = __blockdev_direct_IO(iocb, inode,
8492                                    BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
8493                                    iter, offset, btrfs_get_blocks_direct, NULL,
8494                                    btrfs_submit_direct, flags);
8495         if (iov_iter_rw(iter) == WRITE) {
8496                 current->journal_info = NULL;
8497                 if (ret < 0 && ret != -EIOCBQUEUED) {
8498                         if (dio_data.reserve)
8499                                 btrfs_delalloc_release_space(inode, offset,
8500                                                              dio_data.reserve);
8501                 } else if (ret >= 0 && (size_t)ret < count)
8502                         btrfs_delalloc_release_space(inode, offset,
8503                                                      count - (size_t)ret);
8504         }
8505 out:
8506         if (wakeup)
8507                 inode_dio_end(inode);
8508         if (relock)
8509                 mutex_lock(&inode->i_mutex);
8510
8511         return ret;
8512 }
8513
8514 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8515
8516 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8517                 __u64 start, __u64 len)
8518 {
8519         int     ret;
8520
8521         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8522         if (ret)
8523                 return ret;
8524
8525         return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8526 }
8527
8528 int btrfs_readpage(struct file *file, struct page *page)
8529 {
8530         struct extent_io_tree *tree;
8531         tree = &BTRFS_I(page->mapping->host)->io_tree;
8532         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8533 }
8534
8535 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8536 {
8537         struct extent_io_tree *tree;
8538
8539
8540         if (current->flags & PF_MEMALLOC) {
8541                 redirty_page_for_writepage(wbc, page);
8542                 unlock_page(page);
8543                 return 0;
8544         }
8545         tree = &BTRFS_I(page->mapping->host)->io_tree;
8546         return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
8547 }
8548
8549 static int btrfs_writepages(struct address_space *mapping,
8550                             struct writeback_control *wbc)
8551 {
8552         struct extent_io_tree *tree;
8553
8554         tree = &BTRFS_I(mapping->host)->io_tree;
8555         return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
8556 }
8557
8558 static int
8559 btrfs_readpages(struct file *file, struct address_space *mapping,
8560                 struct list_head *pages, unsigned nr_pages)
8561 {
8562         struct extent_io_tree *tree;
8563         tree = &BTRFS_I(mapping->host)->io_tree;
8564         return extent_readpages(tree, mapping, pages, nr_pages,
8565                                 btrfs_get_extent);
8566 }
8567 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8568 {
8569         struct extent_io_tree *tree;
8570         struct extent_map_tree *map;
8571         int ret;
8572
8573         tree = &BTRFS_I(page->mapping->host)->io_tree;
8574         map = &BTRFS_I(page->mapping->host)->extent_tree;
8575         ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8576         if (ret == 1) {
8577                 ClearPagePrivate(page);
8578                 set_page_private(page, 0);
8579                 page_cache_release(page);
8580         }
8581         return ret;
8582 }
8583
8584 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8585 {
8586         if (PageWriteback(page) || PageDirty(page))
8587                 return 0;
8588         return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
8589 }
8590
8591 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8592                                  unsigned int length)
8593 {
8594         struct inode *inode = page->mapping->host;
8595         struct extent_io_tree *tree;
8596         struct btrfs_ordered_extent *ordered;
8597         struct extent_state *cached_state = NULL;
8598         u64 page_start = page_offset(page);
8599         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
8600         int inode_evicting = inode->i_state & I_FREEING;
8601
8602         /*
8603          * we have the page locked, so new writeback can't start,
8604          * and the dirty bit won't be cleared while we are here.
8605          *
8606          * Wait for IO on this page so that we can safely clear
8607          * the PagePrivate2 bit and do ordered accounting
8608          */
8609         wait_on_page_writeback(page);
8610
8611         tree = &BTRFS_I(inode)->io_tree;
8612         if (offset) {
8613                 btrfs_releasepage(page, GFP_NOFS);
8614                 return;
8615         }
8616
8617         if (!inode_evicting)
8618                 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
8619         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8620         if (ordered) {
8621                 /*
8622                  * IO on this page will never be started, so we need
8623                  * to account for any ordered extents now
8624                  */
8625                 if (!inode_evicting)
8626                         clear_extent_bit(tree, page_start, page_end,
8627                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8628                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8629                                          EXTENT_DEFRAG, 1, 0, &cached_state,
8630                                          GFP_NOFS);
8631                 /*
8632                  * whoever cleared the private bit is responsible
8633                  * for the finish_ordered_io
8634                  */
8635                 if (TestClearPagePrivate2(page)) {
8636                         struct btrfs_ordered_inode_tree *tree;
8637                         u64 new_len;
8638
8639                         tree = &BTRFS_I(inode)->ordered_tree;
8640
8641                         spin_lock_irq(&tree->lock);
8642                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8643                         new_len = page_start - ordered->file_offset;
8644                         if (new_len < ordered->truncated_len)
8645                                 ordered->truncated_len = new_len;
8646                         spin_unlock_irq(&tree->lock);
8647
8648                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8649                                                            page_start,
8650                                                            PAGE_CACHE_SIZE, 1))
8651                                 btrfs_finish_ordered_io(ordered);
8652                 }
8653                 btrfs_put_ordered_extent(ordered);
8654                 if (!inode_evicting) {
8655                         cached_state = NULL;
8656                         lock_extent_bits(tree, page_start, page_end, 0,
8657                                          &cached_state);
8658                 }
8659         }
8660
8661         /*
8662          * Qgroup reserved space handler
8663          * Page here will be either
8664          * 1) Already written to disk
8665          *    In this case, its reserved space is released from data rsv map
8666          *    and will be freed by delayed_ref handler finally.
8667          *    So even we call qgroup_free_data(), it won't decrease reserved
8668          *    space.
8669          * 2) Not written to disk
8670          *    This means the reserved space should be freed here.
8671          */
8672         btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE);
8673         if (!inode_evicting) {
8674                 clear_extent_bit(tree, page_start, page_end,
8675                                  EXTENT_LOCKED | EXTENT_DIRTY |
8676                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8677                                  EXTENT_DEFRAG, 1, 1,
8678                                  &cached_state, GFP_NOFS);
8679
8680                 __btrfs_releasepage(page, GFP_NOFS);
8681         }
8682
8683         ClearPageChecked(page);
8684         if (PagePrivate(page)) {
8685                 ClearPagePrivate(page);
8686                 set_page_private(page, 0);
8687                 page_cache_release(page);
8688         }
8689 }
8690
8691 /*
8692  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8693  * called from a page fault handler when a page is first dirtied. Hence we must
8694  * be careful to check for EOF conditions here. We set the page up correctly
8695  * for a written page which means we get ENOSPC checking when writing into
8696  * holes and correct delalloc and unwritten extent mapping on filesystems that
8697  * support these features.
8698  *
8699  * We are not allowed to take the i_mutex here so we have to play games to
8700  * protect against truncate races as the page could now be beyond EOF.  Because
8701  * vmtruncate() writes the inode size before removing pages, once we have the
8702  * page lock we can determine safely if the page is beyond EOF. If it is not
8703  * beyond EOF, then the page is guaranteed safe against truncation until we
8704  * unlock the page.
8705  */
8706 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8707 {
8708         struct page *page = vmf->page;
8709         struct inode *inode = file_inode(vma->vm_file);
8710         struct btrfs_root *root = BTRFS_I(inode)->root;
8711         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8712         struct btrfs_ordered_extent *ordered;
8713         struct extent_state *cached_state = NULL;
8714         char *kaddr;
8715         unsigned long zero_start;
8716         loff_t size;
8717         int ret;
8718         int reserved = 0;
8719         u64 page_start;
8720         u64 page_end;
8721
8722         sb_start_pagefault(inode->i_sb);
8723         page_start = page_offset(page);
8724         page_end = page_start + PAGE_CACHE_SIZE - 1;
8725
8726         ret = btrfs_delalloc_reserve_space(inode, page_start,
8727                                            PAGE_CACHE_SIZE);
8728         if (!ret) {
8729                 ret = file_update_time(vma->vm_file);
8730                 reserved = 1;
8731         }
8732         if (ret) {
8733                 if (ret == -ENOMEM)
8734                         ret = VM_FAULT_OOM;
8735                 else /* -ENOSPC, -EIO, etc */
8736                         ret = VM_FAULT_SIGBUS;
8737                 if (reserved)
8738                         goto out;
8739                 goto out_noreserve;
8740         }
8741
8742         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8743 again:
8744         lock_page(page);
8745         size = i_size_read(inode);
8746
8747         if ((page->mapping != inode->i_mapping) ||
8748             (page_start >= size)) {
8749                 /* page got truncated out from underneath us */
8750                 goto out_unlock;
8751         }
8752         wait_on_page_writeback(page);
8753
8754         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
8755         set_page_extent_mapped(page);
8756
8757         /*
8758          * we can't set the delalloc bits if there are pending ordered
8759          * extents.  Drop our locks and wait for them to finish
8760          */
8761         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8762         if (ordered) {
8763                 unlock_extent_cached(io_tree, page_start, page_end,
8764                                      &cached_state, GFP_NOFS);
8765                 unlock_page(page);
8766                 btrfs_start_ordered_extent(inode, ordered, 1);
8767                 btrfs_put_ordered_extent(ordered);
8768                 goto again;
8769         }
8770
8771         /*
8772          * XXX - page_mkwrite gets called every time the page is dirtied, even
8773          * if it was already dirty, so for space accounting reasons we need to
8774          * clear any delalloc bits for the range we are fixing to save.  There
8775          * is probably a better way to do this, but for now keep consistent with
8776          * prepare_pages in the normal write path.
8777          */
8778         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
8779                           EXTENT_DIRTY | EXTENT_DELALLOC |
8780                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
8781                           0, 0, &cached_state, GFP_NOFS);
8782
8783         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
8784                                         &cached_state);
8785         if (ret) {
8786                 unlock_extent_cached(io_tree, page_start, page_end,
8787                                      &cached_state, GFP_NOFS);
8788                 ret = VM_FAULT_SIGBUS;
8789                 goto out_unlock;
8790         }
8791         ret = 0;
8792
8793         /* page is wholly or partially inside EOF */
8794         if (page_start + PAGE_CACHE_SIZE > size)
8795                 zero_start = size & ~PAGE_CACHE_MASK;
8796         else
8797                 zero_start = PAGE_CACHE_SIZE;
8798
8799         if (zero_start != PAGE_CACHE_SIZE) {
8800                 kaddr = kmap(page);
8801                 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
8802                 flush_dcache_page(page);
8803                 kunmap(page);
8804         }
8805         ClearPageChecked(page);
8806         set_page_dirty(page);
8807         SetPageUptodate(page);
8808
8809         BTRFS_I(inode)->last_trans = root->fs_info->generation;
8810         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
8811         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
8812
8813         unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
8814
8815 out_unlock:
8816         if (!ret) {
8817                 sb_end_pagefault(inode->i_sb);
8818                 return VM_FAULT_LOCKED;
8819         }
8820         unlock_page(page);
8821 out:
8822         btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
8823 out_noreserve:
8824         sb_end_pagefault(inode->i_sb);
8825         return ret;
8826 }
8827
8828 static int btrfs_truncate(struct inode *inode)
8829 {
8830         struct btrfs_root *root = BTRFS_I(inode)->root;
8831         struct btrfs_block_rsv *rsv;
8832         int ret = 0;
8833         int err = 0;
8834         struct btrfs_trans_handle *trans;
8835         u64 mask = root->sectorsize - 1;
8836         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
8837
8838         ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8839                                        (u64)-1);
8840         if (ret)
8841                 return ret;
8842
8843         /*
8844          * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
8845          * 3 things going on here
8846          *
8847          * 1) We need to reserve space for our orphan item and the space to
8848          * delete our orphan item.  Lord knows we don't want to have a dangling
8849          * orphan item because we didn't reserve space to remove it.
8850          *
8851          * 2) We need to reserve space to update our inode.
8852          *
8853          * 3) We need to have something to cache all the space that is going to
8854          * be free'd up by the truncate operation, but also have some slack
8855          * space reserved in case it uses space during the truncate (thank you
8856          * very much snapshotting).
8857          *
8858          * And we need these to all be seperate.  The fact is we can use alot of
8859          * space doing the truncate, and we have no earthly idea how much space
8860          * we will use, so we need the truncate reservation to be seperate so it
8861          * doesn't end up using space reserved for updating the inode or
8862          * removing the orphan item.  We also need to be able to stop the
8863          * transaction and start a new one, which means we need to be able to
8864          * update the inode several times, and we have no idea of knowing how
8865          * many times that will be, so we can't just reserve 1 item for the
8866          * entirety of the opration, so that has to be done seperately as well.
8867          * Then there is the orphan item, which does indeed need to be held on
8868          * to for the whole operation, and we need nobody to touch this reserved
8869          * space except the orphan code.
8870          *
8871          * So that leaves us with
8872          *
8873          * 1) root->orphan_block_rsv - for the orphan deletion.
8874          * 2) rsv - for the truncate reservation, which we will steal from the
8875          * transaction reservation.
8876          * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
8877          * updating the inode.
8878          */
8879         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
8880         if (!rsv)
8881                 return -ENOMEM;
8882         rsv->size = min_size;
8883         rsv->failfast = 1;
8884
8885         /*
8886          * 1 for the truncate slack space
8887          * 1 for updating the inode.
8888          */
8889         trans = btrfs_start_transaction(root, 2);
8890         if (IS_ERR(trans)) {
8891                 err = PTR_ERR(trans);
8892                 goto out;
8893         }
8894
8895         /* Migrate the slack space for the truncate to our reserve */
8896         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
8897                                       min_size);
8898         BUG_ON(ret);
8899
8900         /*
8901          * So if we truncate and then write and fsync we normally would just
8902          * write the extents that changed, which is a problem if we need to
8903          * first truncate that entire inode.  So set this flag so we write out
8904          * all of the extents in the inode to the sync log so we're completely
8905          * safe.
8906          */
8907         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
8908         trans->block_rsv = rsv;
8909
8910         while (1) {
8911                 ret = btrfs_truncate_inode_items(trans, root, inode,
8912                                                  inode->i_size,
8913                                                  BTRFS_EXTENT_DATA_KEY);
8914                 if (ret != -ENOSPC && ret != -EAGAIN) {
8915                         err = ret;
8916                         break;
8917                 }
8918
8919                 trans->block_rsv = &root->fs_info->trans_block_rsv;
8920                 ret = btrfs_update_inode(trans, root, inode);
8921                 if (ret) {
8922                         err = ret;
8923                         break;
8924                 }
8925
8926                 btrfs_end_transaction(trans, root);
8927                 btrfs_btree_balance_dirty(root);
8928
8929                 trans = btrfs_start_transaction(root, 2);
8930                 if (IS_ERR(trans)) {
8931                         ret = err = PTR_ERR(trans);
8932                         trans = NULL;
8933                         break;
8934                 }
8935
8936                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
8937                                               rsv, min_size);
8938                 BUG_ON(ret);    /* shouldn't happen */
8939                 trans->block_rsv = rsv;
8940         }
8941
8942         if (ret == 0 && inode->i_nlink > 0) {
8943                 trans->block_rsv = root->orphan_block_rsv;
8944                 ret = btrfs_orphan_del(trans, inode);
8945                 if (ret)
8946                         err = ret;
8947         }
8948
8949         if (trans) {
8950                 trans->block_rsv = &root->fs_info->trans_block_rsv;
8951                 ret = btrfs_update_inode(trans, root, inode);
8952                 if (ret && !err)
8953                         err = ret;
8954
8955                 ret = btrfs_end_transaction(trans, root);
8956                 btrfs_btree_balance_dirty(root);
8957         }
8958
8959 out:
8960         btrfs_free_block_rsv(root, rsv);
8961
8962         if (ret && !err)
8963                 err = ret;
8964
8965         return err;
8966 }
8967
8968 /*
8969  * create a new subvolume directory/inode (helper for the ioctl).
8970  */
8971 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
8972                              struct btrfs_root *new_root,
8973                              struct btrfs_root *parent_root,
8974                              u64 new_dirid)
8975 {
8976         struct inode *inode;
8977         int err;
8978         u64 index = 0;
8979
8980         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
8981                                 new_dirid, new_dirid,
8982                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
8983                                 &index);
8984         if (IS_ERR(inode))
8985                 return PTR_ERR(inode);
8986         inode->i_op = &btrfs_dir_inode_operations;
8987         inode->i_fop = &btrfs_dir_file_operations;
8988
8989         set_nlink(inode, 1);
8990         btrfs_i_size_write(inode, 0);
8991         unlock_new_inode(inode);
8992
8993         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
8994         if (err)
8995                 btrfs_err(new_root->fs_info,
8996                           "error inheriting subvolume %llu properties: %d",
8997                           new_root->root_key.objectid, err);
8998
8999         err = btrfs_update_inode(trans, new_root, inode);
9000
9001         iput(inode);
9002         return err;
9003 }
9004
9005 struct inode *btrfs_alloc_inode(struct super_block *sb)
9006 {
9007         struct btrfs_inode *ei;
9008         struct inode *inode;
9009
9010         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
9011         if (!ei)
9012                 return NULL;
9013
9014         ei->root = NULL;
9015         ei->generation = 0;
9016         ei->last_trans = 0;
9017         ei->last_sub_trans = 0;
9018         ei->logged_trans = 0;
9019         ei->delalloc_bytes = 0;
9020         ei->defrag_bytes = 0;
9021         ei->disk_i_size = 0;
9022         ei->flags = 0;
9023         ei->csum_bytes = 0;
9024         ei->index_cnt = (u64)-1;
9025         ei->dir_index = 0;
9026         ei->last_unlink_trans = 0;
9027         ei->last_log_commit = 0;
9028
9029         spin_lock_init(&ei->lock);
9030         ei->outstanding_extents = 0;
9031         ei->reserved_extents = 0;
9032
9033         ei->runtime_flags = 0;
9034         ei->force_compress = BTRFS_COMPRESS_NONE;
9035
9036         ei->delayed_node = NULL;
9037
9038         ei->i_otime.tv_sec = 0;
9039         ei->i_otime.tv_nsec = 0;
9040
9041         inode = &ei->vfs_inode;
9042         extent_map_tree_init(&ei->extent_tree);
9043         extent_io_tree_init(&ei->io_tree, &inode->i_data);
9044         extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
9045         ei->io_tree.track_uptodate = 1;
9046         ei->io_failure_tree.track_uptodate = 1;
9047         atomic_set(&ei->sync_writers, 0);
9048         mutex_init(&ei->log_mutex);
9049         mutex_init(&ei->delalloc_mutex);
9050         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9051         INIT_LIST_HEAD(&ei->delalloc_inodes);
9052         RB_CLEAR_NODE(&ei->rb_node);
9053
9054         return inode;
9055 }
9056
9057 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9058 void btrfs_test_destroy_inode(struct inode *inode)
9059 {
9060         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9061         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9062 }
9063 #endif
9064
9065 static void btrfs_i_callback(struct rcu_head *head)
9066 {
9067         struct inode *inode = container_of(head, struct inode, i_rcu);
9068         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9069 }
9070
9071 void btrfs_destroy_inode(struct inode *inode)
9072 {
9073         struct btrfs_ordered_extent *ordered;
9074         struct btrfs_root *root = BTRFS_I(inode)->root;
9075
9076         WARN_ON(!hlist_empty(&inode->i_dentry));
9077         WARN_ON(inode->i_data.nrpages);
9078         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9079         WARN_ON(BTRFS_I(inode)->reserved_extents);
9080         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9081         WARN_ON(BTRFS_I(inode)->csum_bytes);
9082         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9083
9084         /*
9085          * This can happen where we create an inode, but somebody else also
9086          * created the same inode and we need to destroy the one we already
9087          * created.
9088          */
9089         if (!root)
9090                 goto free;
9091
9092         if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9093                      &BTRFS_I(inode)->runtime_flags)) {
9094                 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
9095                         btrfs_ino(inode));
9096                 atomic_dec(&root->orphan_inodes);
9097         }
9098
9099         while (1) {
9100                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9101                 if (!ordered)
9102                         break;
9103                 else {
9104                         btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
9105                                 ordered->file_offset, ordered->len);
9106                         btrfs_remove_ordered_extent(inode, ordered);
9107                         btrfs_put_ordered_extent(ordered);
9108                         btrfs_put_ordered_extent(ordered);
9109                 }
9110         }
9111         btrfs_qgroup_check_reserved_leak(inode);
9112         inode_tree_del(inode);
9113         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9114 free:
9115         call_rcu(&inode->i_rcu, btrfs_i_callback);
9116 }
9117
9118 int btrfs_drop_inode(struct inode *inode)
9119 {
9120         struct btrfs_root *root = BTRFS_I(inode)->root;
9121
9122         if (root == NULL)
9123                 return 1;
9124
9125         /* the snap/subvol tree is on deleting */
9126         if (btrfs_root_refs(&root->root_item) == 0)
9127                 return 1;
9128         else
9129                 return generic_drop_inode(inode);
9130 }
9131
9132 static void init_once(void *foo)
9133 {
9134         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9135
9136         inode_init_once(&ei->vfs_inode);
9137 }
9138
9139 void btrfs_destroy_cachep(void)
9140 {
9141         /*
9142          * Make sure all delayed rcu free inodes are flushed before we
9143          * destroy cache.
9144          */
9145         rcu_barrier();
9146         if (btrfs_inode_cachep)
9147                 kmem_cache_destroy(btrfs_inode_cachep);
9148         if (btrfs_trans_handle_cachep)
9149                 kmem_cache_destroy(btrfs_trans_handle_cachep);
9150         if (btrfs_transaction_cachep)
9151                 kmem_cache_destroy(btrfs_transaction_cachep);
9152         if (btrfs_path_cachep)
9153                 kmem_cache_destroy(btrfs_path_cachep);
9154         if (btrfs_free_space_cachep)
9155                 kmem_cache_destroy(btrfs_free_space_cachep);
9156         if (btrfs_delalloc_work_cachep)
9157                 kmem_cache_destroy(btrfs_delalloc_work_cachep);
9158 }
9159
9160 int btrfs_init_cachep(void)
9161 {
9162         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9163                         sizeof(struct btrfs_inode), 0,
9164                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
9165         if (!btrfs_inode_cachep)
9166                 goto fail;
9167
9168         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9169                         sizeof(struct btrfs_trans_handle), 0,
9170                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9171         if (!btrfs_trans_handle_cachep)
9172                 goto fail;
9173
9174         btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9175                         sizeof(struct btrfs_transaction), 0,
9176                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9177         if (!btrfs_transaction_cachep)
9178                 goto fail;
9179
9180         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9181                         sizeof(struct btrfs_path), 0,
9182                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9183         if (!btrfs_path_cachep)
9184                 goto fail;
9185
9186         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9187                         sizeof(struct btrfs_free_space), 0,
9188                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9189         if (!btrfs_free_space_cachep)
9190                 goto fail;
9191
9192         btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
9193                         sizeof(struct btrfs_delalloc_work), 0,
9194                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
9195                         NULL);
9196         if (!btrfs_delalloc_work_cachep)
9197                 goto fail;
9198
9199         return 0;
9200 fail:
9201         btrfs_destroy_cachep();
9202         return -ENOMEM;
9203 }
9204
9205 static int btrfs_getattr(struct vfsmount *mnt,
9206                          struct dentry *dentry, struct kstat *stat)
9207 {
9208         u64 delalloc_bytes;
9209         struct inode *inode = d_inode(dentry);
9210         u32 blocksize = inode->i_sb->s_blocksize;
9211
9212         generic_fillattr(inode, stat);
9213         stat->dev = BTRFS_I(inode)->root->anon_dev;
9214         stat->blksize = PAGE_CACHE_SIZE;
9215
9216         spin_lock(&BTRFS_I(inode)->lock);
9217         delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
9218         spin_unlock(&BTRFS_I(inode)->lock);
9219         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9220                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9221         return 0;
9222 }
9223
9224 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9225                            struct inode *new_dir, struct dentry *new_dentry)
9226 {
9227         struct btrfs_trans_handle *trans;
9228         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9229         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9230         struct inode *new_inode = d_inode(new_dentry);
9231         struct inode *old_inode = d_inode(old_dentry);
9232         struct timespec ctime = CURRENT_TIME;
9233         u64 index = 0;
9234         u64 root_objectid;
9235         int ret;
9236         u64 old_ino = btrfs_ino(old_inode);
9237
9238         if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9239                 return -EPERM;
9240
9241         /* we only allow rename subvolume link between subvolumes */
9242         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9243                 return -EXDEV;
9244
9245         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9246             (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
9247                 return -ENOTEMPTY;
9248
9249         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9250             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9251                 return -ENOTEMPTY;
9252
9253
9254         /* check for collisions, even if the  name isn't there */
9255         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9256                              new_dentry->d_name.name,
9257                              new_dentry->d_name.len);
9258
9259         if (ret) {
9260                 if (ret == -EEXIST) {
9261                         /* we shouldn't get
9262                          * eexist without a new_inode */
9263                         if (WARN_ON(!new_inode)) {
9264                                 return ret;
9265                         }
9266                 } else {
9267                         /* maybe -EOVERFLOW */
9268                         return ret;
9269                 }
9270         }
9271         ret = 0;
9272
9273         /*
9274          * we're using rename to replace one file with another.  Start IO on it
9275          * now so  we don't add too much work to the end of the transaction
9276          */
9277         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9278                 filemap_flush(old_inode->i_mapping);
9279
9280         /* close the racy window with snapshot create/destroy ioctl */
9281         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9282                 down_read(&root->fs_info->subvol_sem);
9283         /*
9284          * We want to reserve the absolute worst case amount of items.  So if
9285          * both inodes are subvols and we need to unlink them then that would
9286          * require 4 item modifications, but if they are both normal inodes it
9287          * would require 5 item modifications, so we'll assume their normal
9288          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9289          * should cover the worst case number of items we'll modify.
9290          */
9291         trans = btrfs_start_transaction(root, 11);
9292         if (IS_ERR(trans)) {
9293                 ret = PTR_ERR(trans);
9294                 goto out_notrans;
9295         }
9296
9297         if (dest != root)
9298                 btrfs_record_root_in_trans(trans, dest);
9299
9300         ret = btrfs_set_inode_index(new_dir, &index);
9301         if (ret)
9302                 goto out_fail;
9303
9304         BTRFS_I(old_inode)->dir_index = 0ULL;
9305         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9306                 /* force full log commit if subvolume involved. */
9307                 btrfs_set_log_full_commit(root->fs_info, trans);
9308         } else {
9309                 ret = btrfs_insert_inode_ref(trans, dest,
9310                                              new_dentry->d_name.name,
9311                                              new_dentry->d_name.len,
9312                                              old_ino,
9313                                              btrfs_ino(new_dir), index);
9314                 if (ret)
9315                         goto out_fail;
9316                 /*
9317                  * this is an ugly little race, but the rename is required
9318                  * to make sure that if we crash, the inode is either at the
9319                  * old name or the new one.  pinning the log transaction lets
9320                  * us make sure we don't allow a log commit to come in after
9321                  * we unlink the name but before we add the new name back in.
9322                  */
9323                 btrfs_pin_log_trans(root);
9324         }
9325
9326         inode_inc_iversion(old_dir);
9327         inode_inc_iversion(new_dir);
9328         inode_inc_iversion(old_inode);
9329         old_dir->i_ctime = old_dir->i_mtime = ctime;
9330         new_dir->i_ctime = new_dir->i_mtime = ctime;
9331         old_inode->i_ctime = ctime;
9332
9333         if (old_dentry->d_parent != new_dentry->d_parent)
9334                 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
9335
9336         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9337                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9338                 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9339                                         old_dentry->d_name.name,
9340                                         old_dentry->d_name.len);
9341         } else {
9342                 ret = __btrfs_unlink_inode(trans, root, old_dir,
9343                                         d_inode(old_dentry),
9344                                         old_dentry->d_name.name,
9345                                         old_dentry->d_name.len);
9346                 if (!ret)
9347                         ret = btrfs_update_inode(trans, root, old_inode);
9348         }
9349         if (ret) {
9350                 btrfs_abort_transaction(trans, root, ret);
9351                 goto out_fail;
9352         }
9353
9354         if (new_inode) {
9355                 inode_inc_iversion(new_inode);
9356                 new_inode->i_ctime = CURRENT_TIME;
9357                 if (unlikely(btrfs_ino(new_inode) ==
9358                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9359                         root_objectid = BTRFS_I(new_inode)->location.objectid;
9360                         ret = btrfs_unlink_subvol(trans, dest, new_dir,
9361                                                 root_objectid,
9362                                                 new_dentry->d_name.name,
9363                                                 new_dentry->d_name.len);
9364                         BUG_ON(new_inode->i_nlink == 0);
9365                 } else {
9366                         ret = btrfs_unlink_inode(trans, dest, new_dir,
9367                                                  d_inode(new_dentry),
9368                                                  new_dentry->d_name.name,
9369                                                  new_dentry->d_name.len);
9370                 }
9371                 if (!ret && new_inode->i_nlink == 0)
9372                         ret = btrfs_orphan_add(trans, d_inode(new_dentry));
9373                 if (ret) {
9374                         btrfs_abort_transaction(trans, root, ret);
9375                         goto out_fail;
9376                 }
9377         }
9378
9379         ret = btrfs_add_link(trans, new_dir, old_inode,
9380                              new_dentry->d_name.name,
9381                              new_dentry->d_name.len, 0, index);
9382         if (ret) {
9383                 btrfs_abort_transaction(trans, root, ret);
9384                 goto out_fail;
9385         }
9386
9387         if (old_inode->i_nlink == 1)
9388                 BTRFS_I(old_inode)->dir_index = index;
9389
9390         if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
9391                 struct dentry *parent = new_dentry->d_parent;
9392                 btrfs_log_new_name(trans, old_inode, old_dir, parent);
9393                 btrfs_end_log_trans(root);
9394         }
9395 out_fail:
9396         btrfs_end_transaction(trans, root);
9397 out_notrans:
9398         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9399                 up_read(&root->fs_info->subvol_sem);
9400
9401         return ret;
9402 }
9403
9404 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9405                          struct inode *new_dir, struct dentry *new_dentry,
9406                          unsigned int flags)
9407 {
9408         if (flags & ~RENAME_NOREPLACE)
9409                 return -EINVAL;
9410
9411         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry);
9412 }
9413
9414 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9415 {
9416         struct btrfs_delalloc_work *delalloc_work;
9417         struct inode *inode;
9418
9419         delalloc_work = container_of(work, struct btrfs_delalloc_work,
9420                                      work);
9421         inode = delalloc_work->inode;
9422         if (delalloc_work->wait) {
9423                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
9424         } else {
9425                 filemap_flush(inode->i_mapping);
9426                 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9427                              &BTRFS_I(inode)->runtime_flags))
9428                         filemap_flush(inode->i_mapping);
9429         }
9430
9431         if (delalloc_work->delay_iput)
9432                 btrfs_add_delayed_iput(inode);
9433         else
9434                 iput(inode);
9435         complete(&delalloc_work->completion);
9436 }
9437
9438 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9439                                                     int wait, int delay_iput)
9440 {
9441         struct btrfs_delalloc_work *work;
9442
9443         work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
9444         if (!work)
9445                 return NULL;
9446
9447         init_completion(&work->completion);
9448         INIT_LIST_HEAD(&work->list);
9449         work->inode = inode;
9450         work->wait = wait;
9451         work->delay_iput = delay_iput;
9452         WARN_ON_ONCE(!inode);
9453         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
9454                         btrfs_run_delalloc_work, NULL, NULL);
9455
9456         return work;
9457 }
9458
9459 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
9460 {
9461         wait_for_completion(&work->completion);
9462         kmem_cache_free(btrfs_delalloc_work_cachep, work);
9463 }
9464
9465 /*
9466  * some fairly slow code that needs optimization. This walks the list
9467  * of all the inodes with pending delalloc and forces them to disk.
9468  */
9469 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
9470                                    int nr)
9471 {
9472         struct btrfs_inode *binode;
9473         struct inode *inode;
9474         struct btrfs_delalloc_work *work, *next;
9475         struct list_head works;
9476         struct list_head splice;
9477         int ret = 0;
9478
9479         INIT_LIST_HEAD(&works);
9480         INIT_LIST_HEAD(&splice);
9481
9482         mutex_lock(&root->delalloc_mutex);
9483         spin_lock(&root->delalloc_lock);
9484         list_splice_init(&root->delalloc_inodes, &splice);
9485         while (!list_empty(&splice)) {
9486                 binode = list_entry(splice.next, struct btrfs_inode,
9487                                     delalloc_inodes);
9488
9489                 list_move_tail(&binode->delalloc_inodes,
9490                                &root->delalloc_inodes);
9491                 inode = igrab(&binode->vfs_inode);
9492                 if (!inode) {
9493                         cond_resched_lock(&root->delalloc_lock);
9494                         continue;
9495                 }
9496                 spin_unlock(&root->delalloc_lock);
9497
9498                 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
9499                 if (!work) {
9500                         if (delay_iput)
9501                                 btrfs_add_delayed_iput(inode);
9502                         else
9503                                 iput(inode);
9504                         ret = -ENOMEM;
9505                         goto out;
9506                 }
9507                 list_add_tail(&work->list, &works);
9508                 btrfs_queue_work(root->fs_info->flush_workers,
9509                                  &work->work);
9510                 ret++;
9511                 if (nr != -1 && ret >= nr)
9512                         goto out;
9513                 cond_resched();
9514                 spin_lock(&root->delalloc_lock);
9515         }
9516         spin_unlock(&root->delalloc_lock);
9517
9518 out:
9519         list_for_each_entry_safe(work, next, &works, list) {
9520                 list_del_init(&work->list);
9521                 btrfs_wait_and_free_delalloc_work(work);
9522         }
9523
9524         if (!list_empty_careful(&splice)) {
9525                 spin_lock(&root->delalloc_lock);
9526                 list_splice_tail(&splice, &root->delalloc_inodes);
9527                 spin_unlock(&root->delalloc_lock);
9528         }
9529         mutex_unlock(&root->delalloc_mutex);
9530         return ret;
9531 }
9532
9533 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
9534 {
9535         int ret;
9536
9537         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
9538                 return -EROFS;
9539
9540         ret = __start_delalloc_inodes(root, delay_iput, -1);
9541         if (ret > 0)
9542                 ret = 0;
9543         /*
9544          * the filemap_flush will queue IO into the worker threads, but
9545          * we have to make sure the IO is actually started and that
9546          * ordered extents get created before we return
9547          */
9548         atomic_inc(&root->fs_info->async_submit_draining);
9549         while (atomic_read(&root->fs_info->nr_async_submits) ||
9550               atomic_read(&root->fs_info->async_delalloc_pages)) {
9551                 wait_event(root->fs_info->async_submit_wait,
9552                    (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
9553                     atomic_read(&root->fs_info->async_delalloc_pages) == 0));
9554         }
9555         atomic_dec(&root->fs_info->async_submit_draining);
9556         return ret;
9557 }
9558
9559 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
9560                                int nr)
9561 {
9562         struct btrfs_root *root;
9563         struct list_head splice;
9564         int ret;
9565
9566         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9567                 return -EROFS;
9568
9569         INIT_LIST_HEAD(&splice);
9570
9571         mutex_lock(&fs_info->delalloc_root_mutex);
9572         spin_lock(&fs_info->delalloc_root_lock);
9573         list_splice_init(&fs_info->delalloc_roots, &splice);
9574         while (!list_empty(&splice) && nr) {
9575                 root = list_first_entry(&splice, struct btrfs_root,
9576                                         delalloc_root);
9577                 root = btrfs_grab_fs_root(root);
9578                 BUG_ON(!root);
9579                 list_move_tail(&root->delalloc_root,
9580                                &fs_info->delalloc_roots);
9581                 spin_unlock(&fs_info->delalloc_root_lock);
9582
9583                 ret = __start_delalloc_inodes(root, delay_iput, nr);
9584                 btrfs_put_fs_root(root);
9585                 if (ret < 0)
9586                         goto out;
9587
9588                 if (nr != -1) {
9589                         nr -= ret;
9590                         WARN_ON(nr < 0);
9591                 }
9592                 spin_lock(&fs_info->delalloc_root_lock);
9593         }
9594         spin_unlock(&fs_info->delalloc_root_lock);
9595
9596         ret = 0;
9597         atomic_inc(&fs_info->async_submit_draining);
9598         while (atomic_read(&fs_info->nr_async_submits) ||
9599               atomic_read(&fs_info->async_delalloc_pages)) {
9600                 wait_event(fs_info->async_submit_wait,
9601                    (atomic_read(&fs_info->nr_async_submits) == 0 &&
9602                     atomic_read(&fs_info->async_delalloc_pages) == 0));
9603         }
9604         atomic_dec(&fs_info->async_submit_draining);
9605 out:
9606         if (!list_empty_careful(&splice)) {
9607                 spin_lock(&fs_info->delalloc_root_lock);
9608                 list_splice_tail(&splice, &fs_info->delalloc_roots);
9609                 spin_unlock(&fs_info->delalloc_root_lock);
9610         }
9611         mutex_unlock(&fs_info->delalloc_root_mutex);
9612         return ret;
9613 }
9614
9615 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9616                          const char *symname)
9617 {
9618         struct btrfs_trans_handle *trans;
9619         struct btrfs_root *root = BTRFS_I(dir)->root;
9620         struct btrfs_path *path;
9621         struct btrfs_key key;
9622         struct inode *inode = NULL;
9623         int err;
9624         int drop_inode = 0;
9625         u64 objectid;
9626         u64 index = 0;
9627         int name_len;
9628         int datasize;
9629         unsigned long ptr;
9630         struct btrfs_file_extent_item *ei;
9631         struct extent_buffer *leaf;
9632
9633         name_len = strlen(symname);
9634         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
9635                 return -ENAMETOOLONG;
9636
9637         /*
9638          * 2 items for inode item and ref
9639          * 2 items for dir items
9640          * 1 item for xattr if selinux is on
9641          */
9642         trans = btrfs_start_transaction(root, 5);
9643         if (IS_ERR(trans))
9644                 return PTR_ERR(trans);
9645
9646         err = btrfs_find_free_ino(root, &objectid);
9647         if (err)
9648                 goto out_unlock;
9649
9650         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
9651                                 dentry->d_name.len, btrfs_ino(dir), objectid,
9652                                 S_IFLNK|S_IRWXUGO, &index);
9653         if (IS_ERR(inode)) {
9654                 err = PTR_ERR(inode);
9655                 goto out_unlock;
9656         }
9657
9658         /*
9659         * If the active LSM wants to access the inode during
9660         * d_instantiate it needs these. Smack checks to see
9661         * if the filesystem supports xattrs by looking at the
9662         * ops vector.
9663         */
9664         inode->i_fop = &btrfs_file_operations;
9665         inode->i_op = &btrfs_file_inode_operations;
9666         inode->i_mapping->a_ops = &btrfs_aops;
9667         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9668
9669         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
9670         if (err)
9671                 goto out_unlock_inode;
9672
9673         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
9674         if (err)
9675                 goto out_unlock_inode;
9676
9677         path = btrfs_alloc_path();
9678         if (!path) {
9679                 err = -ENOMEM;
9680                 goto out_unlock_inode;
9681         }
9682         key.objectid = btrfs_ino(inode);
9683         key.offset = 0;
9684         key.type = BTRFS_EXTENT_DATA_KEY;
9685         datasize = btrfs_file_extent_calc_inline_size(name_len);
9686         err = btrfs_insert_empty_item(trans, root, path, &key,
9687                                       datasize);
9688         if (err) {
9689                 btrfs_free_path(path);
9690                 goto out_unlock_inode;
9691         }
9692         leaf = path->nodes[0];
9693         ei = btrfs_item_ptr(leaf, path->slots[0],
9694                             struct btrfs_file_extent_item);
9695         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9696         btrfs_set_file_extent_type(leaf, ei,
9697                                    BTRFS_FILE_EXTENT_INLINE);
9698         btrfs_set_file_extent_encryption(leaf, ei, 0);
9699         btrfs_set_file_extent_compression(leaf, ei, 0);
9700         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9701         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9702
9703         ptr = btrfs_file_extent_inline_start(ei);
9704         write_extent_buffer(leaf, symname, ptr, name_len);
9705         btrfs_mark_buffer_dirty(leaf);
9706         btrfs_free_path(path);
9707
9708         inode->i_op = &btrfs_symlink_inode_operations;
9709         inode->i_mapping->a_ops = &btrfs_symlink_aops;
9710         inode_set_bytes(inode, name_len);
9711         btrfs_i_size_write(inode, name_len);
9712         err = btrfs_update_inode(trans, root, inode);
9713         if (err) {
9714                 drop_inode = 1;
9715                 goto out_unlock_inode;
9716         }
9717
9718         unlock_new_inode(inode);
9719         d_instantiate(dentry, inode);
9720
9721 out_unlock:
9722         btrfs_end_transaction(trans, root);
9723         if (drop_inode) {
9724                 inode_dec_link_count(inode);
9725                 iput(inode);
9726         }
9727         btrfs_btree_balance_dirty(root);
9728         return err;
9729
9730 out_unlock_inode:
9731         drop_inode = 1;
9732         unlock_new_inode(inode);
9733         goto out_unlock;
9734 }
9735
9736 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9737                                        u64 start, u64 num_bytes, u64 min_size,
9738                                        loff_t actual_len, u64 *alloc_hint,
9739                                        struct btrfs_trans_handle *trans)
9740 {
9741         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9742         struct extent_map *em;
9743         struct btrfs_root *root = BTRFS_I(inode)->root;
9744         struct btrfs_key ins;
9745         u64 cur_offset = start;
9746         u64 i_size;
9747         u64 cur_bytes;
9748         int ret = 0;
9749         bool own_trans = true;
9750
9751         if (trans)
9752                 own_trans = false;
9753         while (num_bytes > 0) {
9754                 if (own_trans) {
9755                         trans = btrfs_start_transaction(root, 3);
9756                         if (IS_ERR(trans)) {
9757                                 ret = PTR_ERR(trans);
9758                                 break;
9759                         }
9760                 }
9761
9762                 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
9763                 cur_bytes = max(cur_bytes, min_size);
9764                 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
9765                                            *alloc_hint, &ins, 1, 0);
9766                 if (ret) {
9767                         if (own_trans)
9768                                 btrfs_end_transaction(trans, root);
9769                         break;
9770                 }
9771
9772                 ret = insert_reserved_file_extent(trans, inode,
9773                                                   cur_offset, ins.objectid,
9774                                                   ins.offset, ins.offset,
9775                                                   ins.offset, 0, 0, 0,
9776                                                   BTRFS_FILE_EXTENT_PREALLOC);
9777                 if (ret) {
9778                         btrfs_free_reserved_extent(root, ins.objectid,
9779                                                    ins.offset, 0);
9780                         btrfs_abort_transaction(trans, root, ret);
9781                         if (own_trans)
9782                                 btrfs_end_transaction(trans, root);
9783                         break;
9784                 }
9785
9786                 btrfs_drop_extent_cache(inode, cur_offset,
9787                                         cur_offset + ins.offset -1, 0);
9788
9789                 em = alloc_extent_map();
9790                 if (!em) {
9791                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
9792                                 &BTRFS_I(inode)->runtime_flags);
9793                         goto next;
9794                 }
9795
9796                 em->start = cur_offset;
9797                 em->orig_start = cur_offset;
9798                 em->len = ins.offset;
9799                 em->block_start = ins.objectid;
9800                 em->block_len = ins.offset;
9801                 em->orig_block_len = ins.offset;
9802                 em->ram_bytes = ins.offset;
9803                 em->bdev = root->fs_info->fs_devices->latest_bdev;
9804                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9805                 em->generation = trans->transid;
9806
9807                 while (1) {
9808                         write_lock(&em_tree->lock);
9809                         ret = add_extent_mapping(em_tree, em, 1);
9810                         write_unlock(&em_tree->lock);
9811                         if (ret != -EEXIST)
9812                                 break;
9813                         btrfs_drop_extent_cache(inode, cur_offset,
9814                                                 cur_offset + ins.offset - 1,
9815                                                 0);
9816                 }
9817                 free_extent_map(em);
9818 next:
9819                 num_bytes -= ins.offset;
9820                 cur_offset += ins.offset;
9821                 *alloc_hint = ins.objectid + ins.offset;
9822
9823                 inode_inc_iversion(inode);
9824                 inode->i_ctime = CURRENT_TIME;
9825                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9826                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9827                     (actual_len > inode->i_size) &&
9828                     (cur_offset > inode->i_size)) {
9829                         if (cur_offset > actual_len)
9830                                 i_size = actual_len;
9831                         else
9832                                 i_size = cur_offset;
9833                         i_size_write(inode, i_size);
9834                         btrfs_ordered_update_i_size(inode, i_size, NULL);
9835                 }
9836
9837                 ret = btrfs_update_inode(trans, root, inode);
9838
9839                 if (ret) {
9840                         btrfs_abort_transaction(trans, root, ret);
9841                         if (own_trans)
9842                                 btrfs_end_transaction(trans, root);
9843                         break;
9844                 }
9845
9846                 if (own_trans)
9847                         btrfs_end_transaction(trans, root);
9848         }
9849         return ret;
9850 }
9851
9852 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9853                               u64 start, u64 num_bytes, u64 min_size,
9854                               loff_t actual_len, u64 *alloc_hint)
9855 {
9856         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9857                                            min_size, actual_len, alloc_hint,
9858                                            NULL);
9859 }
9860
9861 int btrfs_prealloc_file_range_trans(struct inode *inode,
9862                                     struct btrfs_trans_handle *trans, int mode,
9863                                     u64 start, u64 num_bytes, u64 min_size,
9864                                     loff_t actual_len, u64 *alloc_hint)
9865 {
9866         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9867                                            min_size, actual_len, alloc_hint, trans);
9868 }
9869
9870 static int btrfs_set_page_dirty(struct page *page)
9871 {
9872         return __set_page_dirty_nobuffers(page);
9873 }
9874
9875 static int btrfs_permission(struct inode *inode, int mask)
9876 {
9877         struct btrfs_root *root = BTRFS_I(inode)->root;
9878         umode_t mode = inode->i_mode;
9879
9880         if (mask & MAY_WRITE &&
9881             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9882                 if (btrfs_root_readonly(root))
9883                         return -EROFS;
9884                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9885                         return -EACCES;
9886         }
9887         return generic_permission(inode, mask);
9888 }
9889
9890 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
9891 {
9892         struct btrfs_trans_handle *trans;
9893         struct btrfs_root *root = BTRFS_I(dir)->root;
9894         struct inode *inode = NULL;
9895         u64 objectid;
9896         u64 index;
9897         int ret = 0;
9898
9899         /*
9900          * 5 units required for adding orphan entry
9901          */
9902         trans = btrfs_start_transaction(root, 5);
9903         if (IS_ERR(trans))
9904                 return PTR_ERR(trans);
9905
9906         ret = btrfs_find_free_ino(root, &objectid);
9907         if (ret)
9908                 goto out;
9909
9910         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
9911                                 btrfs_ino(dir), objectid, mode, &index);
9912         if (IS_ERR(inode)) {
9913                 ret = PTR_ERR(inode);
9914                 inode = NULL;
9915                 goto out;
9916         }
9917
9918         inode->i_fop = &btrfs_file_operations;
9919         inode->i_op = &btrfs_file_inode_operations;
9920
9921         inode->i_mapping->a_ops = &btrfs_aops;
9922         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9923
9924         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
9925         if (ret)
9926                 goto out_inode;
9927
9928         ret = btrfs_update_inode(trans, root, inode);
9929         if (ret)
9930                 goto out_inode;
9931         ret = btrfs_orphan_add(trans, inode);
9932         if (ret)
9933                 goto out_inode;
9934
9935         /*
9936          * We set number of links to 0 in btrfs_new_inode(), and here we set
9937          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
9938          * through:
9939          *
9940          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9941          */
9942         set_nlink(inode, 1);
9943         unlock_new_inode(inode);
9944         d_tmpfile(dentry, inode);
9945         mark_inode_dirty(inode);
9946
9947 out:
9948         btrfs_end_transaction(trans, root);
9949         if (ret)
9950                 iput(inode);
9951         btrfs_balance_delayed_items(root);
9952         btrfs_btree_balance_dirty(root);
9953         return ret;
9954
9955 out_inode:
9956         unlock_new_inode(inode);
9957         goto out;
9958
9959 }
9960
9961 /* Inspired by filemap_check_errors() */
9962 int btrfs_inode_check_errors(struct inode *inode)
9963 {
9964         int ret = 0;
9965
9966         if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
9967             test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
9968                 ret = -ENOSPC;
9969         if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
9970             test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
9971                 ret = -EIO;
9972
9973         return ret;
9974 }
9975
9976 static const struct inode_operations btrfs_dir_inode_operations = {
9977         .getattr        = btrfs_getattr,
9978         .lookup         = btrfs_lookup,
9979         .create         = btrfs_create,
9980         .unlink         = btrfs_unlink,
9981         .link           = btrfs_link,
9982         .mkdir          = btrfs_mkdir,
9983         .rmdir          = btrfs_rmdir,
9984         .rename2        = btrfs_rename2,
9985         .symlink        = btrfs_symlink,
9986         .setattr        = btrfs_setattr,
9987         .mknod          = btrfs_mknod,
9988         .setxattr       = btrfs_setxattr,
9989         .getxattr       = btrfs_getxattr,
9990         .listxattr      = btrfs_listxattr,
9991         .removexattr    = btrfs_removexattr,
9992         .permission     = btrfs_permission,
9993         .get_acl        = btrfs_get_acl,
9994         .set_acl        = btrfs_set_acl,
9995         .update_time    = btrfs_update_time,
9996         .tmpfile        = btrfs_tmpfile,
9997 };
9998 static const struct inode_operations btrfs_dir_ro_inode_operations = {
9999         .lookup         = btrfs_lookup,
10000         .permission     = btrfs_permission,
10001         .get_acl        = btrfs_get_acl,
10002         .set_acl        = btrfs_set_acl,
10003         .update_time    = btrfs_update_time,
10004 };
10005
10006 static const struct file_operations btrfs_dir_file_operations = {
10007         .llseek         = generic_file_llseek,
10008         .read           = generic_read_dir,
10009         .iterate        = btrfs_real_readdir,
10010         .unlocked_ioctl = btrfs_ioctl,
10011 #ifdef CONFIG_COMPAT
10012         .compat_ioctl   = btrfs_ioctl,
10013 #endif
10014         .release        = btrfs_release_file,
10015         .fsync          = btrfs_sync_file,
10016 };
10017
10018 static struct extent_io_ops btrfs_extent_io_ops = {
10019         .fill_delalloc = run_delalloc_range,
10020         .submit_bio_hook = btrfs_submit_bio_hook,
10021         .merge_bio_hook = btrfs_merge_bio_hook,
10022         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10023         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10024         .writepage_start_hook = btrfs_writepage_start_hook,
10025         .set_bit_hook = btrfs_set_bit_hook,
10026         .clear_bit_hook = btrfs_clear_bit_hook,
10027         .merge_extent_hook = btrfs_merge_extent_hook,
10028         .split_extent_hook = btrfs_split_extent_hook,
10029 };
10030
10031 /*
10032  * btrfs doesn't support the bmap operation because swapfiles
10033  * use bmap to make a mapping of extents in the file.  They assume
10034  * these extents won't change over the life of the file and they
10035  * use the bmap result to do IO directly to the drive.
10036  *
10037  * the btrfs bmap call would return logical addresses that aren't
10038  * suitable for IO and they also will change frequently as COW
10039  * operations happen.  So, swapfile + btrfs == corruption.
10040  *
10041  * For now we're avoiding this by dropping bmap.
10042  */
10043 static const struct address_space_operations btrfs_aops = {
10044         .readpage       = btrfs_readpage,
10045         .writepage      = btrfs_writepage,
10046         .writepages     = btrfs_writepages,
10047         .readpages      = btrfs_readpages,
10048         .direct_IO      = btrfs_direct_IO,
10049         .invalidatepage = btrfs_invalidatepage,
10050         .releasepage    = btrfs_releasepage,
10051         .set_page_dirty = btrfs_set_page_dirty,
10052         .error_remove_page = generic_error_remove_page,
10053 };
10054
10055 static const struct address_space_operations btrfs_symlink_aops = {
10056         .readpage       = btrfs_readpage,
10057         .writepage      = btrfs_writepage,
10058         .invalidatepage = btrfs_invalidatepage,
10059         .releasepage    = btrfs_releasepage,
10060 };
10061
10062 static const struct inode_operations btrfs_file_inode_operations = {
10063         .getattr        = btrfs_getattr,
10064         .setattr        = btrfs_setattr,
10065         .setxattr       = btrfs_setxattr,
10066         .getxattr       = btrfs_getxattr,
10067         .listxattr      = btrfs_listxattr,
10068         .removexattr    = btrfs_removexattr,
10069         .permission     = btrfs_permission,
10070         .fiemap         = btrfs_fiemap,
10071         .get_acl        = btrfs_get_acl,
10072         .set_acl        = btrfs_set_acl,
10073         .update_time    = btrfs_update_time,
10074 };
10075 static const struct inode_operations btrfs_special_inode_operations = {
10076         .getattr        = btrfs_getattr,
10077         .setattr        = btrfs_setattr,
10078         .permission     = btrfs_permission,
10079         .setxattr       = btrfs_setxattr,
10080         .getxattr       = btrfs_getxattr,
10081         .listxattr      = btrfs_listxattr,
10082         .removexattr    = btrfs_removexattr,
10083         .get_acl        = btrfs_get_acl,
10084         .set_acl        = btrfs_set_acl,
10085         .update_time    = btrfs_update_time,
10086 };
10087 static const struct inode_operations btrfs_symlink_inode_operations = {
10088         .readlink       = generic_readlink,
10089         .follow_link    = page_follow_link_light,
10090         .put_link       = page_put_link,
10091         .getattr        = btrfs_getattr,
10092         .setattr        = btrfs_setattr,
10093         .permission     = btrfs_permission,
10094         .setxattr       = btrfs_setxattr,
10095         .getxattr       = btrfs_getxattr,
10096         .listxattr      = btrfs_listxattr,
10097         .removexattr    = btrfs_removexattr,
10098         .update_time    = btrfs_update_time,
10099 };
10100
10101 const struct dentry_operations btrfs_dentry_operations = {
10102         .d_delete       = btrfs_dentry_delete,
10103         .d_release      = btrfs_dentry_release,
10104 };