btrfs: tests, move initialization into tests/
[cascardo/linux.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include <linux/btrfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/posix_acl_xattr.h>
45 #include <linux/uio.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 #include "hash.h"
61 #include "props.h"
62 #include "qgroup.h"
63 #include "dedupe.h"
64
65 struct btrfs_iget_args {
66         struct btrfs_key *location;
67         struct btrfs_root *root;
68 };
69
70 struct btrfs_dio_data {
71         u64 outstanding_extents;
72         u64 reserve;
73         u64 unsubmitted_oe_range_start;
74         u64 unsubmitted_oe_range_end;
75 };
76
77 static const struct inode_operations btrfs_dir_inode_operations;
78 static const struct inode_operations btrfs_symlink_inode_operations;
79 static const struct inode_operations btrfs_dir_ro_inode_operations;
80 static const struct inode_operations btrfs_special_inode_operations;
81 static const struct inode_operations btrfs_file_inode_operations;
82 static const struct address_space_operations btrfs_aops;
83 static const struct address_space_operations btrfs_symlink_aops;
84 static const struct file_operations btrfs_dir_file_operations;
85 static const struct extent_io_ops btrfs_extent_io_ops;
86
87 static struct kmem_cache *btrfs_inode_cachep;
88 struct kmem_cache *btrfs_trans_handle_cachep;
89 struct kmem_cache *btrfs_transaction_cachep;
90 struct kmem_cache *btrfs_path_cachep;
91 struct kmem_cache *btrfs_free_space_cachep;
92
93 #define S_SHIFT 12
94 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
95         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
96         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
97         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
98         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
99         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
100         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
101         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
102 };
103
104 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
105 static int btrfs_truncate(struct inode *inode);
106 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
107 static noinline int cow_file_range(struct inode *inode,
108                                    struct page *locked_page,
109                                    u64 start, u64 end, u64 delalloc_end,
110                                    int *page_started, unsigned long *nr_written,
111                                    int unlock, struct btrfs_dedupe_hash *hash);
112 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
113                                            u64 len, u64 orig_start,
114                                            u64 block_start, u64 block_len,
115                                            u64 orig_block_len, u64 ram_bytes,
116                                            int type);
117
118 static int btrfs_dirty_inode(struct inode *inode);
119
120 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
121 void btrfs_test_inode_set_ops(struct inode *inode)
122 {
123         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
124 }
125 #endif
126
127 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
128                                      struct inode *inode,  struct inode *dir,
129                                      const struct qstr *qstr)
130 {
131         int err;
132
133         err = btrfs_init_acl(trans, inode, dir);
134         if (!err)
135                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
136         return err;
137 }
138
139 /*
140  * this does all the hard work for inserting an inline extent into
141  * the btree.  The caller should have done a btrfs_drop_extents so that
142  * no overlapping inline items exist in the btree
143  */
144 static int insert_inline_extent(struct btrfs_trans_handle *trans,
145                                 struct btrfs_path *path, int extent_inserted,
146                                 struct btrfs_root *root, struct inode *inode,
147                                 u64 start, size_t size, size_t compressed_size,
148                                 int compress_type,
149                                 struct page **compressed_pages)
150 {
151         struct extent_buffer *leaf;
152         struct page *page = NULL;
153         char *kaddr;
154         unsigned long ptr;
155         struct btrfs_file_extent_item *ei;
156         int err = 0;
157         int ret;
158         size_t cur_size = size;
159         unsigned long offset;
160
161         if (compressed_size && compressed_pages)
162                 cur_size = compressed_size;
163
164         inode_add_bytes(inode, size);
165
166         if (!extent_inserted) {
167                 struct btrfs_key key;
168                 size_t datasize;
169
170                 key.objectid = btrfs_ino(inode);
171                 key.offset = start;
172                 key.type = BTRFS_EXTENT_DATA_KEY;
173
174                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
175                 path->leave_spinning = 1;
176                 ret = btrfs_insert_empty_item(trans, root, path, &key,
177                                               datasize);
178                 if (ret) {
179                         err = ret;
180                         goto fail;
181                 }
182         }
183         leaf = path->nodes[0];
184         ei = btrfs_item_ptr(leaf, path->slots[0],
185                             struct btrfs_file_extent_item);
186         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
187         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
188         btrfs_set_file_extent_encryption(leaf, ei, 0);
189         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
190         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
191         ptr = btrfs_file_extent_inline_start(ei);
192
193         if (compress_type != BTRFS_COMPRESS_NONE) {
194                 struct page *cpage;
195                 int i = 0;
196                 while (compressed_size > 0) {
197                         cpage = compressed_pages[i];
198                         cur_size = min_t(unsigned long, compressed_size,
199                                        PAGE_SIZE);
200
201                         kaddr = kmap_atomic(cpage);
202                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
203                         kunmap_atomic(kaddr);
204
205                         i++;
206                         ptr += cur_size;
207                         compressed_size -= cur_size;
208                 }
209                 btrfs_set_file_extent_compression(leaf, ei,
210                                                   compress_type);
211         } else {
212                 page = find_get_page(inode->i_mapping,
213                                      start >> PAGE_SHIFT);
214                 btrfs_set_file_extent_compression(leaf, ei, 0);
215                 kaddr = kmap_atomic(page);
216                 offset = start & (PAGE_SIZE - 1);
217                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
218                 kunmap_atomic(kaddr);
219                 put_page(page);
220         }
221         btrfs_mark_buffer_dirty(leaf);
222         btrfs_release_path(path);
223
224         /*
225          * we're an inline extent, so nobody can
226          * extend the file past i_size without locking
227          * a page we already have locked.
228          *
229          * We must do any isize and inode updates
230          * before we unlock the pages.  Otherwise we
231          * could end up racing with unlink.
232          */
233         BTRFS_I(inode)->disk_i_size = inode->i_size;
234         ret = btrfs_update_inode(trans, root, inode);
235
236         return ret;
237 fail:
238         return err;
239 }
240
241
242 /*
243  * conditionally insert an inline extent into the file.  This
244  * does the checks required to make sure the data is small enough
245  * to fit as an inline extent.
246  */
247 static noinline int cow_file_range_inline(struct btrfs_root *root,
248                                           struct inode *inode, u64 start,
249                                           u64 end, size_t compressed_size,
250                                           int compress_type,
251                                           struct page **compressed_pages)
252 {
253         struct btrfs_trans_handle *trans;
254         u64 isize = i_size_read(inode);
255         u64 actual_end = min(end + 1, isize);
256         u64 inline_len = actual_end - start;
257         u64 aligned_end = ALIGN(end, root->sectorsize);
258         u64 data_len = inline_len;
259         int ret;
260         struct btrfs_path *path;
261         int extent_inserted = 0;
262         u32 extent_item_size;
263
264         if (compressed_size)
265                 data_len = compressed_size;
266
267         if (start > 0 ||
268             actual_end > root->sectorsize ||
269             data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
270             (!compressed_size &&
271             (actual_end & (root->sectorsize - 1)) == 0) ||
272             end + 1 < isize ||
273             data_len > root->fs_info->max_inline) {
274                 return 1;
275         }
276
277         path = btrfs_alloc_path();
278         if (!path)
279                 return -ENOMEM;
280
281         trans = btrfs_join_transaction(root);
282         if (IS_ERR(trans)) {
283                 btrfs_free_path(path);
284                 return PTR_ERR(trans);
285         }
286         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
287
288         if (compressed_size && compressed_pages)
289                 extent_item_size = btrfs_file_extent_calc_inline_size(
290                    compressed_size);
291         else
292                 extent_item_size = btrfs_file_extent_calc_inline_size(
293                     inline_len);
294
295         ret = __btrfs_drop_extents(trans, root, inode, path,
296                                    start, aligned_end, NULL,
297                                    1, 1, extent_item_size, &extent_inserted);
298         if (ret) {
299                 btrfs_abort_transaction(trans, root, ret);
300                 goto out;
301         }
302
303         if (isize > actual_end)
304                 inline_len = min_t(u64, isize, actual_end);
305         ret = insert_inline_extent(trans, path, extent_inserted,
306                                    root, inode, start,
307                                    inline_len, compressed_size,
308                                    compress_type, compressed_pages);
309         if (ret && ret != -ENOSPC) {
310                 btrfs_abort_transaction(trans, root, ret);
311                 goto out;
312         } else if (ret == -ENOSPC) {
313                 ret = 1;
314                 goto out;
315         }
316
317         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
318         btrfs_delalloc_release_metadata(inode, end + 1 - start);
319         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
320 out:
321         /*
322          * Don't forget to free the reserved space, as for inlined extent
323          * it won't count as data extent, free them directly here.
324          * And at reserve time, it's always aligned to page size, so
325          * just free one page here.
326          */
327         btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
328         btrfs_free_path(path);
329         btrfs_end_transaction(trans, root);
330         return ret;
331 }
332
333 struct async_extent {
334         u64 start;
335         u64 ram_size;
336         u64 compressed_size;
337         struct page **pages;
338         unsigned long nr_pages;
339         int compress_type;
340         struct list_head list;
341 };
342
343 struct async_cow {
344         struct inode *inode;
345         struct btrfs_root *root;
346         struct page *locked_page;
347         u64 start;
348         u64 end;
349         struct list_head extents;
350         struct btrfs_work work;
351 };
352
353 static noinline int add_async_extent(struct async_cow *cow,
354                                      u64 start, u64 ram_size,
355                                      u64 compressed_size,
356                                      struct page **pages,
357                                      unsigned long nr_pages,
358                                      int compress_type)
359 {
360         struct async_extent *async_extent;
361
362         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
363         BUG_ON(!async_extent); /* -ENOMEM */
364         async_extent->start = start;
365         async_extent->ram_size = ram_size;
366         async_extent->compressed_size = compressed_size;
367         async_extent->pages = pages;
368         async_extent->nr_pages = nr_pages;
369         async_extent->compress_type = compress_type;
370         list_add_tail(&async_extent->list, &cow->extents);
371         return 0;
372 }
373
374 static inline int inode_need_compress(struct inode *inode)
375 {
376         struct btrfs_root *root = BTRFS_I(inode)->root;
377
378         /* force compress */
379         if (btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
380                 return 1;
381         /* bad compression ratios */
382         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
383                 return 0;
384         if (btrfs_test_opt(root->fs_info, COMPRESS) ||
385             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
386             BTRFS_I(inode)->force_compress)
387                 return 1;
388         return 0;
389 }
390
391 /*
392  * we create compressed extents in two phases.  The first
393  * phase compresses a range of pages that have already been
394  * locked (both pages and state bits are locked).
395  *
396  * This is done inside an ordered work queue, and the compression
397  * is spread across many cpus.  The actual IO submission is step
398  * two, and the ordered work queue takes care of making sure that
399  * happens in the same order things were put onto the queue by
400  * writepages and friends.
401  *
402  * If this code finds it can't get good compression, it puts an
403  * entry onto the work queue to write the uncompressed bytes.  This
404  * makes sure that both compressed inodes and uncompressed inodes
405  * are written in the same order that the flusher thread sent them
406  * down.
407  */
408 static noinline void compress_file_range(struct inode *inode,
409                                         struct page *locked_page,
410                                         u64 start, u64 end,
411                                         struct async_cow *async_cow,
412                                         int *num_added)
413 {
414         struct btrfs_root *root = BTRFS_I(inode)->root;
415         u64 num_bytes;
416         u64 blocksize = root->sectorsize;
417         u64 actual_end;
418         u64 isize = i_size_read(inode);
419         int ret = 0;
420         struct page **pages = NULL;
421         unsigned long nr_pages;
422         unsigned long nr_pages_ret = 0;
423         unsigned long total_compressed = 0;
424         unsigned long total_in = 0;
425         unsigned long max_compressed = SZ_128K;
426         unsigned long max_uncompressed = SZ_128K;
427         int i;
428         int will_compress;
429         int compress_type = root->fs_info->compress_type;
430         int redirty = 0;
431
432         /* if this is a small write inside eof, kick off a defrag */
433         if ((end - start + 1) < SZ_16K &&
434             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
435                 btrfs_add_inode_defrag(NULL, inode);
436
437         actual_end = min_t(u64, isize, end + 1);
438 again:
439         will_compress = 0;
440         nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
441         nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE);
442
443         /*
444          * we don't want to send crud past the end of i_size through
445          * compression, that's just a waste of CPU time.  So, if the
446          * end of the file is before the start of our current
447          * requested range of bytes, we bail out to the uncompressed
448          * cleanup code that can deal with all of this.
449          *
450          * It isn't really the fastest way to fix things, but this is a
451          * very uncommon corner.
452          */
453         if (actual_end <= start)
454                 goto cleanup_and_bail_uncompressed;
455
456         total_compressed = actual_end - start;
457
458         /*
459          * skip compression for a small file range(<=blocksize) that
460          * isn't an inline extent, since it doesn't save disk space at all.
461          */
462         if (total_compressed <= blocksize &&
463            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
464                 goto cleanup_and_bail_uncompressed;
465
466         /* we want to make sure that amount of ram required to uncompress
467          * an extent is reasonable, so we limit the total size in ram
468          * of a compressed extent to 128k.  This is a crucial number
469          * because it also controls how easily we can spread reads across
470          * cpus for decompression.
471          *
472          * We also want to make sure the amount of IO required to do
473          * a random read is reasonably small, so we limit the size of
474          * a compressed extent to 128k.
475          */
476         total_compressed = min(total_compressed, max_uncompressed);
477         num_bytes = ALIGN(end - start + 1, blocksize);
478         num_bytes = max(blocksize,  num_bytes);
479         total_in = 0;
480         ret = 0;
481
482         /*
483          * we do compression for mount -o compress and when the
484          * inode has not been flagged as nocompress.  This flag can
485          * change at any time if we discover bad compression ratios.
486          */
487         if (inode_need_compress(inode)) {
488                 WARN_ON(pages);
489                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
490                 if (!pages) {
491                         /* just bail out to the uncompressed code */
492                         goto cont;
493                 }
494
495                 if (BTRFS_I(inode)->force_compress)
496                         compress_type = BTRFS_I(inode)->force_compress;
497
498                 /*
499                  * we need to call clear_page_dirty_for_io on each
500                  * page in the range.  Otherwise applications with the file
501                  * mmap'd can wander in and change the page contents while
502                  * we are compressing them.
503                  *
504                  * If the compression fails for any reason, we set the pages
505                  * dirty again later on.
506                  */
507                 extent_range_clear_dirty_for_io(inode, start, end);
508                 redirty = 1;
509                 ret = btrfs_compress_pages(compress_type,
510                                            inode->i_mapping, start,
511                                            total_compressed, pages,
512                                            nr_pages, &nr_pages_ret,
513                                            &total_in,
514                                            &total_compressed,
515                                            max_compressed);
516
517                 if (!ret) {
518                         unsigned long offset = total_compressed &
519                                 (PAGE_SIZE - 1);
520                         struct page *page = pages[nr_pages_ret - 1];
521                         char *kaddr;
522
523                         /* zero the tail end of the last page, we might be
524                          * sending it down to disk
525                          */
526                         if (offset) {
527                                 kaddr = kmap_atomic(page);
528                                 memset(kaddr + offset, 0,
529                                        PAGE_SIZE - offset);
530                                 kunmap_atomic(kaddr);
531                         }
532                         will_compress = 1;
533                 }
534         }
535 cont:
536         if (start == 0) {
537                 /* lets try to make an inline extent */
538                 if (ret || total_in < (actual_end - start)) {
539                         /* we didn't compress the entire range, try
540                          * to make an uncompressed inline extent.
541                          */
542                         ret = cow_file_range_inline(root, inode, start, end,
543                                                     0, 0, NULL);
544                 } else {
545                         /* try making a compressed inline extent */
546                         ret = cow_file_range_inline(root, inode, start, end,
547                                                     total_compressed,
548                                                     compress_type, pages);
549                 }
550                 if (ret <= 0) {
551                         unsigned long clear_flags = EXTENT_DELALLOC |
552                                 EXTENT_DEFRAG;
553                         unsigned long page_error_op;
554
555                         clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
556                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
557
558                         /*
559                          * inline extent creation worked or returned error,
560                          * we don't need to create any more async work items.
561                          * Unlock and free up our temp pages.
562                          */
563                         extent_clear_unlock_delalloc(inode, start, end, NULL,
564                                                      clear_flags, PAGE_UNLOCK |
565                                                      PAGE_CLEAR_DIRTY |
566                                                      PAGE_SET_WRITEBACK |
567                                                      page_error_op |
568                                                      PAGE_END_WRITEBACK);
569                         goto free_pages_out;
570                 }
571         }
572
573         if (will_compress) {
574                 /*
575                  * we aren't doing an inline extent round the compressed size
576                  * up to a block size boundary so the allocator does sane
577                  * things
578                  */
579                 total_compressed = ALIGN(total_compressed, blocksize);
580
581                 /*
582                  * one last check to make sure the compression is really a
583                  * win, compare the page count read with the blocks on disk
584                  */
585                 total_in = ALIGN(total_in, PAGE_SIZE);
586                 if (total_compressed >= total_in) {
587                         will_compress = 0;
588                 } else {
589                         num_bytes = total_in;
590                         *num_added += 1;
591
592                         /*
593                          * The async work queues will take care of doing actual
594                          * allocation on disk for these compressed pages, and
595                          * will submit them to the elevator.
596                          */
597                         add_async_extent(async_cow, start, num_bytes,
598                                         total_compressed, pages, nr_pages_ret,
599                                         compress_type);
600
601                         if (start + num_bytes < end) {
602                                 start += num_bytes;
603                                 pages = NULL;
604                                 cond_resched();
605                                 goto again;
606                         }
607                         return;
608                 }
609         }
610         if (pages) {
611                 /*
612                  * the compression code ran but failed to make things smaller,
613                  * free any pages it allocated and our page pointer array
614                  */
615                 for (i = 0; i < nr_pages_ret; i++) {
616                         WARN_ON(pages[i]->mapping);
617                         put_page(pages[i]);
618                 }
619                 kfree(pages);
620                 pages = NULL;
621                 total_compressed = 0;
622                 nr_pages_ret = 0;
623
624                 /* flag the file so we don't compress in the future */
625                 if (!btrfs_test_opt(root->fs_info, FORCE_COMPRESS) &&
626                     !(BTRFS_I(inode)->force_compress)) {
627                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
628                 }
629         }
630 cleanup_and_bail_uncompressed:
631         /*
632          * No compression, but we still need to write the pages in the file
633          * we've been given so far.  redirty the locked page if it corresponds
634          * to our extent and set things up for the async work queue to run
635          * cow_file_range to do the normal delalloc dance.
636          */
637         if (page_offset(locked_page) >= start &&
638             page_offset(locked_page) <= end)
639                 __set_page_dirty_nobuffers(locked_page);
640                 /* unlocked later on in the async handlers */
641
642         if (redirty)
643                 extent_range_redirty_for_io(inode, start, end);
644         add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
645                          BTRFS_COMPRESS_NONE);
646         *num_added += 1;
647
648         return;
649
650 free_pages_out:
651         for (i = 0; i < nr_pages_ret; i++) {
652                 WARN_ON(pages[i]->mapping);
653                 put_page(pages[i]);
654         }
655         kfree(pages);
656 }
657
658 static void free_async_extent_pages(struct async_extent *async_extent)
659 {
660         int i;
661
662         if (!async_extent->pages)
663                 return;
664
665         for (i = 0; i < async_extent->nr_pages; i++) {
666                 WARN_ON(async_extent->pages[i]->mapping);
667                 put_page(async_extent->pages[i]);
668         }
669         kfree(async_extent->pages);
670         async_extent->nr_pages = 0;
671         async_extent->pages = NULL;
672 }
673
674 /*
675  * phase two of compressed writeback.  This is the ordered portion
676  * of the code, which only gets called in the order the work was
677  * queued.  We walk all the async extents created by compress_file_range
678  * and send them down to the disk.
679  */
680 static noinline void submit_compressed_extents(struct inode *inode,
681                                               struct async_cow *async_cow)
682 {
683         struct async_extent *async_extent;
684         u64 alloc_hint = 0;
685         struct btrfs_key ins;
686         struct extent_map *em;
687         struct btrfs_root *root = BTRFS_I(inode)->root;
688         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
689         struct extent_io_tree *io_tree;
690         int ret = 0;
691
692 again:
693         while (!list_empty(&async_cow->extents)) {
694                 async_extent = list_entry(async_cow->extents.next,
695                                           struct async_extent, list);
696                 list_del(&async_extent->list);
697
698                 io_tree = &BTRFS_I(inode)->io_tree;
699
700 retry:
701                 /* did the compression code fall back to uncompressed IO? */
702                 if (!async_extent->pages) {
703                         int page_started = 0;
704                         unsigned long nr_written = 0;
705
706                         lock_extent(io_tree, async_extent->start,
707                                          async_extent->start +
708                                          async_extent->ram_size - 1);
709
710                         /* allocate blocks */
711                         ret = cow_file_range(inode, async_cow->locked_page,
712                                              async_extent->start,
713                                              async_extent->start +
714                                              async_extent->ram_size - 1,
715                                              async_extent->start +
716                                              async_extent->ram_size - 1,
717                                              &page_started, &nr_written, 0,
718                                              NULL);
719
720                         /* JDM XXX */
721
722                         /*
723                          * if page_started, cow_file_range inserted an
724                          * inline extent and took care of all the unlocking
725                          * and IO for us.  Otherwise, we need to submit
726                          * all those pages down to the drive.
727                          */
728                         if (!page_started && !ret)
729                                 extent_write_locked_range(io_tree,
730                                                   inode, async_extent->start,
731                                                   async_extent->start +
732                                                   async_extent->ram_size - 1,
733                                                   btrfs_get_extent,
734                                                   WB_SYNC_ALL);
735                         else if (ret)
736                                 unlock_page(async_cow->locked_page);
737                         kfree(async_extent);
738                         cond_resched();
739                         continue;
740                 }
741
742                 lock_extent(io_tree, async_extent->start,
743                             async_extent->start + async_extent->ram_size - 1);
744
745                 ret = btrfs_reserve_extent(root,
746                                            async_extent->compressed_size,
747                                            async_extent->compressed_size,
748                                            0, alloc_hint, &ins, 1, 1);
749                 if (ret) {
750                         free_async_extent_pages(async_extent);
751
752                         if (ret == -ENOSPC) {
753                                 unlock_extent(io_tree, async_extent->start,
754                                               async_extent->start +
755                                               async_extent->ram_size - 1);
756
757                                 /*
758                                  * we need to redirty the pages if we decide to
759                                  * fallback to uncompressed IO, otherwise we
760                                  * will not submit these pages down to lower
761                                  * layers.
762                                  */
763                                 extent_range_redirty_for_io(inode,
764                                                 async_extent->start,
765                                                 async_extent->start +
766                                                 async_extent->ram_size - 1);
767
768                                 goto retry;
769                         }
770                         goto out_free;
771                 }
772                 /*
773                  * here we're doing allocation and writeback of the
774                  * compressed pages
775                  */
776                 btrfs_drop_extent_cache(inode, async_extent->start,
777                                         async_extent->start +
778                                         async_extent->ram_size - 1, 0);
779
780                 em = alloc_extent_map();
781                 if (!em) {
782                         ret = -ENOMEM;
783                         goto out_free_reserve;
784                 }
785                 em->start = async_extent->start;
786                 em->len = async_extent->ram_size;
787                 em->orig_start = em->start;
788                 em->mod_start = em->start;
789                 em->mod_len = em->len;
790
791                 em->block_start = ins.objectid;
792                 em->block_len = ins.offset;
793                 em->orig_block_len = ins.offset;
794                 em->ram_bytes = async_extent->ram_size;
795                 em->bdev = root->fs_info->fs_devices->latest_bdev;
796                 em->compress_type = async_extent->compress_type;
797                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
798                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
799                 em->generation = -1;
800
801                 while (1) {
802                         write_lock(&em_tree->lock);
803                         ret = add_extent_mapping(em_tree, em, 1);
804                         write_unlock(&em_tree->lock);
805                         if (ret != -EEXIST) {
806                                 free_extent_map(em);
807                                 break;
808                         }
809                         btrfs_drop_extent_cache(inode, async_extent->start,
810                                                 async_extent->start +
811                                                 async_extent->ram_size - 1, 0);
812                 }
813
814                 if (ret)
815                         goto out_free_reserve;
816
817                 ret = btrfs_add_ordered_extent_compress(inode,
818                                                 async_extent->start,
819                                                 ins.objectid,
820                                                 async_extent->ram_size,
821                                                 ins.offset,
822                                                 BTRFS_ORDERED_COMPRESSED,
823                                                 async_extent->compress_type);
824                 if (ret) {
825                         btrfs_drop_extent_cache(inode, async_extent->start,
826                                                 async_extent->start +
827                                                 async_extent->ram_size - 1, 0);
828                         goto out_free_reserve;
829                 }
830                 btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
831
832                 /*
833                  * clear dirty, set writeback and unlock the pages.
834                  */
835                 extent_clear_unlock_delalloc(inode, async_extent->start,
836                                 async_extent->start +
837                                 async_extent->ram_size - 1,
838                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
839                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
840                                 PAGE_SET_WRITEBACK);
841                 ret = btrfs_submit_compressed_write(inode,
842                                     async_extent->start,
843                                     async_extent->ram_size,
844                                     ins.objectid,
845                                     ins.offset, async_extent->pages,
846                                     async_extent->nr_pages);
847                 if (ret) {
848                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
849                         struct page *p = async_extent->pages[0];
850                         const u64 start = async_extent->start;
851                         const u64 end = start + async_extent->ram_size - 1;
852
853                         p->mapping = inode->i_mapping;
854                         tree->ops->writepage_end_io_hook(p, start, end,
855                                                          NULL, 0);
856                         p->mapping = NULL;
857                         extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
858                                                      PAGE_END_WRITEBACK |
859                                                      PAGE_SET_ERROR);
860                         free_async_extent_pages(async_extent);
861                 }
862                 alloc_hint = ins.objectid + ins.offset;
863                 kfree(async_extent);
864                 cond_resched();
865         }
866         return;
867 out_free_reserve:
868         btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
869         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
870 out_free:
871         extent_clear_unlock_delalloc(inode, async_extent->start,
872                                      async_extent->start +
873                                      async_extent->ram_size - 1,
874                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
875                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
876                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
877                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
878                                      PAGE_SET_ERROR);
879         free_async_extent_pages(async_extent);
880         kfree(async_extent);
881         goto again;
882 }
883
884 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
885                                       u64 num_bytes)
886 {
887         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
888         struct extent_map *em;
889         u64 alloc_hint = 0;
890
891         read_lock(&em_tree->lock);
892         em = search_extent_mapping(em_tree, start, num_bytes);
893         if (em) {
894                 /*
895                  * if block start isn't an actual block number then find the
896                  * first block in this inode and use that as a hint.  If that
897                  * block is also bogus then just don't worry about it.
898                  */
899                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
900                         free_extent_map(em);
901                         em = search_extent_mapping(em_tree, 0, 0);
902                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
903                                 alloc_hint = em->block_start;
904                         if (em)
905                                 free_extent_map(em);
906                 } else {
907                         alloc_hint = em->block_start;
908                         free_extent_map(em);
909                 }
910         }
911         read_unlock(&em_tree->lock);
912
913         return alloc_hint;
914 }
915
916 /*
917  * when extent_io.c finds a delayed allocation range in the file,
918  * the call backs end up in this code.  The basic idea is to
919  * allocate extents on disk for the range, and create ordered data structs
920  * in ram to track those extents.
921  *
922  * locked_page is the page that writepage had locked already.  We use
923  * it to make sure we don't do extra locks or unlocks.
924  *
925  * *page_started is set to one if we unlock locked_page and do everything
926  * required to start IO on it.  It may be clean and already done with
927  * IO when we return.
928  */
929 static noinline int cow_file_range(struct inode *inode,
930                                    struct page *locked_page,
931                                    u64 start, u64 end, u64 delalloc_end,
932                                    int *page_started, unsigned long *nr_written,
933                                    int unlock, struct btrfs_dedupe_hash *hash)
934 {
935         struct btrfs_root *root = BTRFS_I(inode)->root;
936         u64 alloc_hint = 0;
937         u64 num_bytes;
938         unsigned long ram_size;
939         u64 disk_num_bytes;
940         u64 cur_alloc_size;
941         u64 blocksize = root->sectorsize;
942         struct btrfs_key ins;
943         struct extent_map *em;
944         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
945         int ret = 0;
946
947         if (btrfs_is_free_space_inode(inode)) {
948                 WARN_ON_ONCE(1);
949                 ret = -EINVAL;
950                 goto out_unlock;
951         }
952
953         num_bytes = ALIGN(end - start + 1, blocksize);
954         num_bytes = max(blocksize,  num_bytes);
955         disk_num_bytes = num_bytes;
956
957         /* if this is a small write inside eof, kick off defrag */
958         if (num_bytes < SZ_64K &&
959             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
960                 btrfs_add_inode_defrag(NULL, inode);
961
962         if (start == 0) {
963                 /* lets try to make an inline extent */
964                 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
965                                             NULL);
966                 if (ret == 0) {
967                         extent_clear_unlock_delalloc(inode, start, end, NULL,
968                                      EXTENT_LOCKED | EXTENT_DELALLOC |
969                                      EXTENT_DEFRAG, PAGE_UNLOCK |
970                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
971                                      PAGE_END_WRITEBACK);
972
973                         *nr_written = *nr_written +
974                              (end - start + PAGE_SIZE) / PAGE_SIZE;
975                         *page_started = 1;
976                         goto out;
977                 } else if (ret < 0) {
978                         goto out_unlock;
979                 }
980         }
981
982         BUG_ON(disk_num_bytes >
983                btrfs_super_total_bytes(root->fs_info->super_copy));
984
985         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
986         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
987
988         while (disk_num_bytes > 0) {
989                 unsigned long op;
990
991                 cur_alloc_size = disk_num_bytes;
992                 ret = btrfs_reserve_extent(root, cur_alloc_size,
993                                            root->sectorsize, 0, alloc_hint,
994                                            &ins, 1, 1);
995                 if (ret < 0)
996                         goto out_unlock;
997
998                 em = alloc_extent_map();
999                 if (!em) {
1000                         ret = -ENOMEM;
1001                         goto out_reserve;
1002                 }
1003                 em->start = start;
1004                 em->orig_start = em->start;
1005                 ram_size = ins.offset;
1006                 em->len = ins.offset;
1007                 em->mod_start = em->start;
1008                 em->mod_len = em->len;
1009
1010                 em->block_start = ins.objectid;
1011                 em->block_len = ins.offset;
1012                 em->orig_block_len = ins.offset;
1013                 em->ram_bytes = ram_size;
1014                 em->bdev = root->fs_info->fs_devices->latest_bdev;
1015                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1016                 em->generation = -1;
1017
1018                 while (1) {
1019                         write_lock(&em_tree->lock);
1020                         ret = add_extent_mapping(em_tree, em, 1);
1021                         write_unlock(&em_tree->lock);
1022                         if (ret != -EEXIST) {
1023                                 free_extent_map(em);
1024                                 break;
1025                         }
1026                         btrfs_drop_extent_cache(inode, start,
1027                                                 start + ram_size - 1, 0);
1028                 }
1029                 if (ret)
1030                         goto out_reserve;
1031
1032                 cur_alloc_size = ins.offset;
1033                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1034                                                ram_size, cur_alloc_size, 0);
1035                 if (ret)
1036                         goto out_drop_extent_cache;
1037
1038                 if (root->root_key.objectid ==
1039                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1040                         ret = btrfs_reloc_clone_csums(inode, start,
1041                                                       cur_alloc_size);
1042                         if (ret)
1043                                 goto out_drop_extent_cache;
1044                 }
1045
1046                 btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
1047
1048                 if (disk_num_bytes < cur_alloc_size)
1049                         break;
1050
1051                 /* we're not doing compressed IO, don't unlock the first
1052                  * page (which the caller expects to stay locked), don't
1053                  * clear any dirty bits and don't set any writeback bits
1054                  *
1055                  * Do set the Private2 bit so we know this page was properly
1056                  * setup for writepage
1057                  */
1058                 op = unlock ? PAGE_UNLOCK : 0;
1059                 op |= PAGE_SET_PRIVATE2;
1060
1061                 extent_clear_unlock_delalloc(inode, start,
1062                                              start + ram_size - 1, locked_page,
1063                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1064                                              op);
1065                 disk_num_bytes -= cur_alloc_size;
1066                 num_bytes -= cur_alloc_size;
1067                 alloc_hint = ins.objectid + ins.offset;
1068                 start += cur_alloc_size;
1069         }
1070 out:
1071         return ret;
1072
1073 out_drop_extent_cache:
1074         btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1075 out_reserve:
1076         btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
1077         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
1078 out_unlock:
1079         extent_clear_unlock_delalloc(inode, start, end, locked_page,
1080                                      EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1081                                      EXTENT_DELALLOC | EXTENT_DEFRAG,
1082                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1083                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1084         goto out;
1085 }
1086
1087 /*
1088  * work queue call back to started compression on a file and pages
1089  */
1090 static noinline void async_cow_start(struct btrfs_work *work)
1091 {
1092         struct async_cow *async_cow;
1093         int num_added = 0;
1094         async_cow = container_of(work, struct async_cow, work);
1095
1096         compress_file_range(async_cow->inode, async_cow->locked_page,
1097                             async_cow->start, async_cow->end, async_cow,
1098                             &num_added);
1099         if (num_added == 0) {
1100                 btrfs_add_delayed_iput(async_cow->inode);
1101                 async_cow->inode = NULL;
1102         }
1103 }
1104
1105 /*
1106  * work queue call back to submit previously compressed pages
1107  */
1108 static noinline void async_cow_submit(struct btrfs_work *work)
1109 {
1110         struct async_cow *async_cow;
1111         struct btrfs_root *root;
1112         unsigned long nr_pages;
1113
1114         async_cow = container_of(work, struct async_cow, work);
1115
1116         root = async_cow->root;
1117         nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
1118                 PAGE_SHIFT;
1119
1120         /*
1121          * atomic_sub_return implies a barrier for waitqueue_active
1122          */
1123         if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1124             5 * SZ_1M &&
1125             waitqueue_active(&root->fs_info->async_submit_wait))
1126                 wake_up(&root->fs_info->async_submit_wait);
1127
1128         if (async_cow->inode)
1129                 submit_compressed_extents(async_cow->inode, async_cow);
1130 }
1131
1132 static noinline void async_cow_free(struct btrfs_work *work)
1133 {
1134         struct async_cow *async_cow;
1135         async_cow = container_of(work, struct async_cow, work);
1136         if (async_cow->inode)
1137                 btrfs_add_delayed_iput(async_cow->inode);
1138         kfree(async_cow);
1139 }
1140
1141 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1142                                 u64 start, u64 end, int *page_started,
1143                                 unsigned long *nr_written)
1144 {
1145         struct async_cow *async_cow;
1146         struct btrfs_root *root = BTRFS_I(inode)->root;
1147         unsigned long nr_pages;
1148         u64 cur_end;
1149         int limit = 10 * SZ_1M;
1150
1151         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1152                          1, 0, NULL, GFP_NOFS);
1153         while (start < end) {
1154                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1155                 BUG_ON(!async_cow); /* -ENOMEM */
1156                 async_cow->inode = igrab(inode);
1157                 async_cow->root = root;
1158                 async_cow->locked_page = locked_page;
1159                 async_cow->start = start;
1160
1161                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1162                     !btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
1163                         cur_end = end;
1164                 else
1165                         cur_end = min(end, start + SZ_512K - 1);
1166
1167                 async_cow->end = cur_end;
1168                 INIT_LIST_HEAD(&async_cow->extents);
1169
1170                 btrfs_init_work(&async_cow->work,
1171                                 btrfs_delalloc_helper,
1172                                 async_cow_start, async_cow_submit,
1173                                 async_cow_free);
1174
1175                 nr_pages = (cur_end - start + PAGE_SIZE) >>
1176                         PAGE_SHIFT;
1177                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1178
1179                 btrfs_queue_work(root->fs_info->delalloc_workers,
1180                                  &async_cow->work);
1181
1182                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1183                         wait_event(root->fs_info->async_submit_wait,
1184                            (atomic_read(&root->fs_info->async_delalloc_pages) <
1185                             limit));
1186                 }
1187
1188                 while (atomic_read(&root->fs_info->async_submit_draining) &&
1189                       atomic_read(&root->fs_info->async_delalloc_pages)) {
1190                         wait_event(root->fs_info->async_submit_wait,
1191                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
1192                            0));
1193                 }
1194
1195                 *nr_written += nr_pages;
1196                 start = cur_end + 1;
1197         }
1198         *page_started = 1;
1199         return 0;
1200 }
1201
1202 static noinline int csum_exist_in_range(struct btrfs_root *root,
1203                                         u64 bytenr, u64 num_bytes)
1204 {
1205         int ret;
1206         struct btrfs_ordered_sum *sums;
1207         LIST_HEAD(list);
1208
1209         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1210                                        bytenr + num_bytes - 1, &list, 0);
1211         if (ret == 0 && list_empty(&list))
1212                 return 0;
1213
1214         while (!list_empty(&list)) {
1215                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1216                 list_del(&sums->list);
1217                 kfree(sums);
1218         }
1219         return 1;
1220 }
1221
1222 /*
1223  * when nowcow writeback call back.  This checks for snapshots or COW copies
1224  * of the extents that exist in the file, and COWs the file as required.
1225  *
1226  * If no cow copies or snapshots exist, we write directly to the existing
1227  * blocks on disk
1228  */
1229 static noinline int run_delalloc_nocow(struct inode *inode,
1230                                        struct page *locked_page,
1231                               u64 start, u64 end, int *page_started, int force,
1232                               unsigned long *nr_written)
1233 {
1234         struct btrfs_root *root = BTRFS_I(inode)->root;
1235         struct btrfs_trans_handle *trans;
1236         struct extent_buffer *leaf;
1237         struct btrfs_path *path;
1238         struct btrfs_file_extent_item *fi;
1239         struct btrfs_key found_key;
1240         u64 cow_start;
1241         u64 cur_offset;
1242         u64 extent_end;
1243         u64 extent_offset;
1244         u64 disk_bytenr;
1245         u64 num_bytes;
1246         u64 disk_num_bytes;
1247         u64 ram_bytes;
1248         int extent_type;
1249         int ret, err;
1250         int type;
1251         int nocow;
1252         int check_prev = 1;
1253         bool nolock;
1254         u64 ino = btrfs_ino(inode);
1255
1256         path = btrfs_alloc_path();
1257         if (!path) {
1258                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1259                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1260                                              EXTENT_DO_ACCOUNTING |
1261                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1262                                              PAGE_CLEAR_DIRTY |
1263                                              PAGE_SET_WRITEBACK |
1264                                              PAGE_END_WRITEBACK);
1265                 return -ENOMEM;
1266         }
1267
1268         nolock = btrfs_is_free_space_inode(inode);
1269
1270         if (nolock)
1271                 trans = btrfs_join_transaction_nolock(root);
1272         else
1273                 trans = btrfs_join_transaction(root);
1274
1275         if (IS_ERR(trans)) {
1276                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1277                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1278                                              EXTENT_DO_ACCOUNTING |
1279                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1280                                              PAGE_CLEAR_DIRTY |
1281                                              PAGE_SET_WRITEBACK |
1282                                              PAGE_END_WRITEBACK);
1283                 btrfs_free_path(path);
1284                 return PTR_ERR(trans);
1285         }
1286
1287         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1288
1289         cow_start = (u64)-1;
1290         cur_offset = start;
1291         while (1) {
1292                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1293                                                cur_offset, 0);
1294                 if (ret < 0)
1295                         goto error;
1296                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1297                         leaf = path->nodes[0];
1298                         btrfs_item_key_to_cpu(leaf, &found_key,
1299                                               path->slots[0] - 1);
1300                         if (found_key.objectid == ino &&
1301                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1302                                 path->slots[0]--;
1303                 }
1304                 check_prev = 0;
1305 next_slot:
1306                 leaf = path->nodes[0];
1307                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1308                         ret = btrfs_next_leaf(root, path);
1309                         if (ret < 0)
1310                                 goto error;
1311                         if (ret > 0)
1312                                 break;
1313                         leaf = path->nodes[0];
1314                 }
1315
1316                 nocow = 0;
1317                 disk_bytenr = 0;
1318                 num_bytes = 0;
1319                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1320
1321                 if (found_key.objectid > ino)
1322                         break;
1323                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1324                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1325                         path->slots[0]++;
1326                         goto next_slot;
1327                 }
1328                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1329                     found_key.offset > end)
1330                         break;
1331
1332                 if (found_key.offset > cur_offset) {
1333                         extent_end = found_key.offset;
1334                         extent_type = 0;
1335                         goto out_check;
1336                 }
1337
1338                 fi = btrfs_item_ptr(leaf, path->slots[0],
1339                                     struct btrfs_file_extent_item);
1340                 extent_type = btrfs_file_extent_type(leaf, fi);
1341
1342                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1343                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1344                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1345                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1346                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1347                         extent_end = found_key.offset +
1348                                 btrfs_file_extent_num_bytes(leaf, fi);
1349                         disk_num_bytes =
1350                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1351                         if (extent_end <= start) {
1352                                 path->slots[0]++;
1353                                 goto next_slot;
1354                         }
1355                         if (disk_bytenr == 0)
1356                                 goto out_check;
1357                         if (btrfs_file_extent_compression(leaf, fi) ||
1358                             btrfs_file_extent_encryption(leaf, fi) ||
1359                             btrfs_file_extent_other_encoding(leaf, fi))
1360                                 goto out_check;
1361                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1362                                 goto out_check;
1363                         if (btrfs_extent_readonly(root, disk_bytenr))
1364                                 goto out_check;
1365                         if (btrfs_cross_ref_exist(trans, root, ino,
1366                                                   found_key.offset -
1367                                                   extent_offset, disk_bytenr))
1368                                 goto out_check;
1369                         disk_bytenr += extent_offset;
1370                         disk_bytenr += cur_offset - found_key.offset;
1371                         num_bytes = min(end + 1, extent_end) - cur_offset;
1372                         /*
1373                          * if there are pending snapshots for this root,
1374                          * we fall into common COW way.
1375                          */
1376                         if (!nolock) {
1377                                 err = btrfs_start_write_no_snapshoting(root);
1378                                 if (!err)
1379                                         goto out_check;
1380                         }
1381                         /*
1382                          * force cow if csum exists in the range.
1383                          * this ensure that csum for a given extent are
1384                          * either valid or do not exist.
1385                          */
1386                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1387                                 goto out_check;
1388                         if (!btrfs_inc_nocow_writers(root->fs_info,
1389                                                      disk_bytenr))
1390                                 goto out_check;
1391                         nocow = 1;
1392                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1393                         extent_end = found_key.offset +
1394                                 btrfs_file_extent_inline_len(leaf,
1395                                                      path->slots[0], fi);
1396                         extent_end = ALIGN(extent_end, root->sectorsize);
1397                 } else {
1398                         BUG_ON(1);
1399                 }
1400 out_check:
1401                 if (extent_end <= start) {
1402                         path->slots[0]++;
1403                         if (!nolock && nocow)
1404                                 btrfs_end_write_no_snapshoting(root);
1405                         if (nocow)
1406                                 btrfs_dec_nocow_writers(root->fs_info,
1407                                                         disk_bytenr);
1408                         goto next_slot;
1409                 }
1410                 if (!nocow) {
1411                         if (cow_start == (u64)-1)
1412                                 cow_start = cur_offset;
1413                         cur_offset = extent_end;
1414                         if (cur_offset > end)
1415                                 break;
1416                         path->slots[0]++;
1417                         goto next_slot;
1418                 }
1419
1420                 btrfs_release_path(path);
1421                 if (cow_start != (u64)-1) {
1422                         ret = cow_file_range(inode, locked_page,
1423                                              cow_start, found_key.offset - 1,
1424                                              end, page_started, nr_written, 1,
1425                                              NULL);
1426                         if (ret) {
1427                                 if (!nolock && nocow)
1428                                         btrfs_end_write_no_snapshoting(root);
1429                                 if (nocow)
1430                                         btrfs_dec_nocow_writers(root->fs_info,
1431                                                                 disk_bytenr);
1432                                 goto error;
1433                         }
1434                         cow_start = (u64)-1;
1435                 }
1436
1437                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1438                         struct extent_map *em;
1439                         struct extent_map_tree *em_tree;
1440                         em_tree = &BTRFS_I(inode)->extent_tree;
1441                         em = alloc_extent_map();
1442                         BUG_ON(!em); /* -ENOMEM */
1443                         em->start = cur_offset;
1444                         em->orig_start = found_key.offset - extent_offset;
1445                         em->len = num_bytes;
1446                         em->block_len = num_bytes;
1447                         em->block_start = disk_bytenr;
1448                         em->orig_block_len = disk_num_bytes;
1449                         em->ram_bytes = ram_bytes;
1450                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1451                         em->mod_start = em->start;
1452                         em->mod_len = em->len;
1453                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1454                         set_bit(EXTENT_FLAG_FILLING, &em->flags);
1455                         em->generation = -1;
1456                         while (1) {
1457                                 write_lock(&em_tree->lock);
1458                                 ret = add_extent_mapping(em_tree, em, 1);
1459                                 write_unlock(&em_tree->lock);
1460                                 if (ret != -EEXIST) {
1461                                         free_extent_map(em);
1462                                         break;
1463                                 }
1464                                 btrfs_drop_extent_cache(inode, em->start,
1465                                                 em->start + em->len - 1, 0);
1466                         }
1467                         type = BTRFS_ORDERED_PREALLOC;
1468                 } else {
1469                         type = BTRFS_ORDERED_NOCOW;
1470                 }
1471
1472                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1473                                                num_bytes, num_bytes, type);
1474                 if (nocow)
1475                         btrfs_dec_nocow_writers(root->fs_info, disk_bytenr);
1476                 BUG_ON(ret); /* -ENOMEM */
1477
1478                 if (root->root_key.objectid ==
1479                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1480                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1481                                                       num_bytes);
1482                         if (ret) {
1483                                 if (!nolock && nocow)
1484                                         btrfs_end_write_no_snapshoting(root);
1485                                 goto error;
1486                         }
1487                 }
1488
1489                 extent_clear_unlock_delalloc(inode, cur_offset,
1490                                              cur_offset + num_bytes - 1,
1491                                              locked_page, EXTENT_LOCKED |
1492                                              EXTENT_DELALLOC, PAGE_UNLOCK |
1493                                              PAGE_SET_PRIVATE2);
1494                 if (!nolock && nocow)
1495                         btrfs_end_write_no_snapshoting(root);
1496                 cur_offset = extent_end;
1497                 if (cur_offset > end)
1498                         break;
1499         }
1500         btrfs_release_path(path);
1501
1502         if (cur_offset <= end && cow_start == (u64)-1) {
1503                 cow_start = cur_offset;
1504                 cur_offset = end;
1505         }
1506
1507         if (cow_start != (u64)-1) {
1508                 ret = cow_file_range(inode, locked_page, cow_start, end, end,
1509                                      page_started, nr_written, 1, NULL);
1510                 if (ret)
1511                         goto error;
1512         }
1513
1514 error:
1515         err = btrfs_end_transaction(trans, root);
1516         if (!ret)
1517                 ret = err;
1518
1519         if (ret && cur_offset < end)
1520                 extent_clear_unlock_delalloc(inode, cur_offset, end,
1521                                              locked_page, EXTENT_LOCKED |
1522                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1523                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1524                                              PAGE_CLEAR_DIRTY |
1525                                              PAGE_SET_WRITEBACK |
1526                                              PAGE_END_WRITEBACK);
1527         btrfs_free_path(path);
1528         return ret;
1529 }
1530
1531 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1532 {
1533
1534         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1535             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1536                 return 0;
1537
1538         /*
1539          * @defrag_bytes is a hint value, no spinlock held here,
1540          * if is not zero, it means the file is defragging.
1541          * Force cow if given extent needs to be defragged.
1542          */
1543         if (BTRFS_I(inode)->defrag_bytes &&
1544             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1545                            EXTENT_DEFRAG, 0, NULL))
1546                 return 1;
1547
1548         return 0;
1549 }
1550
1551 /*
1552  * extent_io.c call back to do delayed allocation processing
1553  */
1554 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1555                               u64 start, u64 end, int *page_started,
1556                               unsigned long *nr_written)
1557 {
1558         int ret;
1559         int force_cow = need_force_cow(inode, start, end);
1560
1561         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1562                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1563                                          page_started, 1, nr_written);
1564         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1565                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1566                                          page_started, 0, nr_written);
1567         } else if (!inode_need_compress(inode)) {
1568                 ret = cow_file_range(inode, locked_page, start, end, end,
1569                                       page_started, nr_written, 1, NULL);
1570         } else {
1571                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1572                         &BTRFS_I(inode)->runtime_flags);
1573                 ret = cow_file_range_async(inode, locked_page, start, end,
1574                                            page_started, nr_written);
1575         }
1576         return ret;
1577 }
1578
1579 static void btrfs_split_extent_hook(struct inode *inode,
1580                                     struct extent_state *orig, u64 split)
1581 {
1582         u64 size;
1583
1584         /* not delalloc, ignore it */
1585         if (!(orig->state & EXTENT_DELALLOC))
1586                 return;
1587
1588         size = orig->end - orig->start + 1;
1589         if (size > BTRFS_MAX_EXTENT_SIZE) {
1590                 u64 num_extents;
1591                 u64 new_size;
1592
1593                 /*
1594                  * See the explanation in btrfs_merge_extent_hook, the same
1595                  * applies here, just in reverse.
1596                  */
1597                 new_size = orig->end - split + 1;
1598                 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1599                                         BTRFS_MAX_EXTENT_SIZE);
1600                 new_size = split - orig->start;
1601                 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1602                                         BTRFS_MAX_EXTENT_SIZE);
1603                 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1604                               BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1605                         return;
1606         }
1607
1608         spin_lock(&BTRFS_I(inode)->lock);
1609         BTRFS_I(inode)->outstanding_extents++;
1610         spin_unlock(&BTRFS_I(inode)->lock);
1611 }
1612
1613 /*
1614  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1615  * extents so we can keep track of new extents that are just merged onto old
1616  * extents, such as when we are doing sequential writes, so we can properly
1617  * account for the metadata space we'll need.
1618  */
1619 static void btrfs_merge_extent_hook(struct inode *inode,
1620                                     struct extent_state *new,
1621                                     struct extent_state *other)
1622 {
1623         u64 new_size, old_size;
1624         u64 num_extents;
1625
1626         /* not delalloc, ignore it */
1627         if (!(other->state & EXTENT_DELALLOC))
1628                 return;
1629
1630         if (new->start > other->start)
1631                 new_size = new->end - other->start + 1;
1632         else
1633                 new_size = other->end - new->start + 1;
1634
1635         /* we're not bigger than the max, unreserve the space and go */
1636         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1637                 spin_lock(&BTRFS_I(inode)->lock);
1638                 BTRFS_I(inode)->outstanding_extents--;
1639                 spin_unlock(&BTRFS_I(inode)->lock);
1640                 return;
1641         }
1642
1643         /*
1644          * We have to add up either side to figure out how many extents were
1645          * accounted for before we merged into one big extent.  If the number of
1646          * extents we accounted for is <= the amount we need for the new range
1647          * then we can return, otherwise drop.  Think of it like this
1648          *
1649          * [ 4k][MAX_SIZE]
1650          *
1651          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1652          * need 2 outstanding extents, on one side we have 1 and the other side
1653          * we have 1 so they are == and we can return.  But in this case
1654          *
1655          * [MAX_SIZE+4k][MAX_SIZE+4k]
1656          *
1657          * Each range on their own accounts for 2 extents, but merged together
1658          * they are only 3 extents worth of accounting, so we need to drop in
1659          * this case.
1660          */
1661         old_size = other->end - other->start + 1;
1662         num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1663                                 BTRFS_MAX_EXTENT_SIZE);
1664         old_size = new->end - new->start + 1;
1665         num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1666                                  BTRFS_MAX_EXTENT_SIZE);
1667
1668         if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1669                       BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1670                 return;
1671
1672         spin_lock(&BTRFS_I(inode)->lock);
1673         BTRFS_I(inode)->outstanding_extents--;
1674         spin_unlock(&BTRFS_I(inode)->lock);
1675 }
1676
1677 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1678                                       struct inode *inode)
1679 {
1680         spin_lock(&root->delalloc_lock);
1681         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1682                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1683                               &root->delalloc_inodes);
1684                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1685                         &BTRFS_I(inode)->runtime_flags);
1686                 root->nr_delalloc_inodes++;
1687                 if (root->nr_delalloc_inodes == 1) {
1688                         spin_lock(&root->fs_info->delalloc_root_lock);
1689                         BUG_ON(!list_empty(&root->delalloc_root));
1690                         list_add_tail(&root->delalloc_root,
1691                                       &root->fs_info->delalloc_roots);
1692                         spin_unlock(&root->fs_info->delalloc_root_lock);
1693                 }
1694         }
1695         spin_unlock(&root->delalloc_lock);
1696 }
1697
1698 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1699                                      struct inode *inode)
1700 {
1701         spin_lock(&root->delalloc_lock);
1702         if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1703                 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1704                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1705                           &BTRFS_I(inode)->runtime_flags);
1706                 root->nr_delalloc_inodes--;
1707                 if (!root->nr_delalloc_inodes) {
1708                         spin_lock(&root->fs_info->delalloc_root_lock);
1709                         BUG_ON(list_empty(&root->delalloc_root));
1710                         list_del_init(&root->delalloc_root);
1711                         spin_unlock(&root->fs_info->delalloc_root_lock);
1712                 }
1713         }
1714         spin_unlock(&root->delalloc_lock);
1715 }
1716
1717 /*
1718  * extent_io.c set_bit_hook, used to track delayed allocation
1719  * bytes in this file, and to maintain the list of inodes that
1720  * have pending delalloc work to be done.
1721  */
1722 static void btrfs_set_bit_hook(struct inode *inode,
1723                                struct extent_state *state, unsigned *bits)
1724 {
1725
1726         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1727                 WARN_ON(1);
1728         /*
1729          * set_bit and clear bit hooks normally require _irqsave/restore
1730          * but in this case, we are only testing for the DELALLOC
1731          * bit, which is only set or cleared with irqs on
1732          */
1733         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1734                 struct btrfs_root *root = BTRFS_I(inode)->root;
1735                 u64 len = state->end + 1 - state->start;
1736                 bool do_list = !btrfs_is_free_space_inode(inode);
1737
1738                 if (*bits & EXTENT_FIRST_DELALLOC) {
1739                         *bits &= ~EXTENT_FIRST_DELALLOC;
1740                 } else {
1741                         spin_lock(&BTRFS_I(inode)->lock);
1742                         BTRFS_I(inode)->outstanding_extents++;
1743                         spin_unlock(&BTRFS_I(inode)->lock);
1744                 }
1745
1746                 /* For sanity tests */
1747                 if (btrfs_test_is_dummy_root(root))
1748                         return;
1749
1750                 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1751                                      root->fs_info->delalloc_batch);
1752                 spin_lock(&BTRFS_I(inode)->lock);
1753                 BTRFS_I(inode)->delalloc_bytes += len;
1754                 if (*bits & EXTENT_DEFRAG)
1755                         BTRFS_I(inode)->defrag_bytes += len;
1756                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1757                                          &BTRFS_I(inode)->runtime_flags))
1758                         btrfs_add_delalloc_inodes(root, inode);
1759                 spin_unlock(&BTRFS_I(inode)->lock);
1760         }
1761 }
1762
1763 /*
1764  * extent_io.c clear_bit_hook, see set_bit_hook for why
1765  */
1766 static void btrfs_clear_bit_hook(struct inode *inode,
1767                                  struct extent_state *state,
1768                                  unsigned *bits)
1769 {
1770         u64 len = state->end + 1 - state->start;
1771         u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
1772                                     BTRFS_MAX_EXTENT_SIZE);
1773
1774         spin_lock(&BTRFS_I(inode)->lock);
1775         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
1776                 BTRFS_I(inode)->defrag_bytes -= len;
1777         spin_unlock(&BTRFS_I(inode)->lock);
1778
1779         /*
1780          * set_bit and clear bit hooks normally require _irqsave/restore
1781          * but in this case, we are only testing for the DELALLOC
1782          * bit, which is only set or cleared with irqs on
1783          */
1784         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1785                 struct btrfs_root *root = BTRFS_I(inode)->root;
1786                 bool do_list = !btrfs_is_free_space_inode(inode);
1787
1788                 if (*bits & EXTENT_FIRST_DELALLOC) {
1789                         *bits &= ~EXTENT_FIRST_DELALLOC;
1790                 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1791                         spin_lock(&BTRFS_I(inode)->lock);
1792                         BTRFS_I(inode)->outstanding_extents -= num_extents;
1793                         spin_unlock(&BTRFS_I(inode)->lock);
1794                 }
1795
1796                 /*
1797                  * We don't reserve metadata space for space cache inodes so we
1798                  * don't need to call dellalloc_release_metadata if there is an
1799                  * error.
1800                  */
1801                 if (*bits & EXTENT_DO_ACCOUNTING &&
1802                     root != root->fs_info->tree_root)
1803                         btrfs_delalloc_release_metadata(inode, len);
1804
1805                 /* For sanity tests. */
1806                 if (btrfs_test_is_dummy_root(root))
1807                         return;
1808
1809                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1810                     && do_list && !(state->state & EXTENT_NORESERVE))
1811                         btrfs_free_reserved_data_space_noquota(inode,
1812                                         state->start, len);
1813
1814                 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1815                                      root->fs_info->delalloc_batch);
1816                 spin_lock(&BTRFS_I(inode)->lock);
1817                 BTRFS_I(inode)->delalloc_bytes -= len;
1818                 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1819                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1820                              &BTRFS_I(inode)->runtime_flags))
1821                         btrfs_del_delalloc_inode(root, inode);
1822                 spin_unlock(&BTRFS_I(inode)->lock);
1823         }
1824 }
1825
1826 /*
1827  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1828  * we don't create bios that span stripes or chunks
1829  *
1830  * return 1 if page cannot be merged to bio
1831  * return 0 if page can be merged to bio
1832  * return error otherwise
1833  */
1834 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1835                          size_t size, struct bio *bio,
1836                          unsigned long bio_flags)
1837 {
1838         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1839         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1840         u64 length = 0;
1841         u64 map_length;
1842         int ret;
1843
1844         if (bio_flags & EXTENT_BIO_COMPRESSED)
1845                 return 0;
1846
1847         length = bio->bi_iter.bi_size;
1848         map_length = length;
1849         ret = btrfs_map_block(root->fs_info, rw, logical,
1850                               &map_length, NULL, 0);
1851         if (ret < 0)
1852                 return ret;
1853         if (map_length < length + size)
1854                 return 1;
1855         return 0;
1856 }
1857
1858 /*
1859  * in order to insert checksums into the metadata in large chunks,
1860  * we wait until bio submission time.   All the pages in the bio are
1861  * checksummed and sums are attached onto the ordered extent record.
1862  *
1863  * At IO completion time the cums attached on the ordered extent record
1864  * are inserted into the btree
1865  */
1866 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1867                                     struct bio *bio, int mirror_num,
1868                                     unsigned long bio_flags,
1869                                     u64 bio_offset)
1870 {
1871         struct btrfs_root *root = BTRFS_I(inode)->root;
1872         int ret = 0;
1873
1874         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1875         BUG_ON(ret); /* -ENOMEM */
1876         return 0;
1877 }
1878
1879 /*
1880  * in order to insert checksums into the metadata in large chunks,
1881  * we wait until bio submission time.   All the pages in the bio are
1882  * checksummed and sums are attached onto the ordered extent record.
1883  *
1884  * At IO completion time the cums attached on the ordered extent record
1885  * are inserted into the btree
1886  */
1887 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1888                           int mirror_num, unsigned long bio_flags,
1889                           u64 bio_offset)
1890 {
1891         struct btrfs_root *root = BTRFS_I(inode)->root;
1892         int ret;
1893
1894         ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1895         if (ret) {
1896                 bio->bi_error = ret;
1897                 bio_endio(bio);
1898         }
1899         return ret;
1900 }
1901
1902 /*
1903  * extent_io.c submission hook. This does the right thing for csum calculation
1904  * on write, or reading the csums from the tree before a read
1905  */
1906 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1907                           int mirror_num, unsigned long bio_flags,
1908                           u64 bio_offset)
1909 {
1910         struct btrfs_root *root = BTRFS_I(inode)->root;
1911         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1912         int ret = 0;
1913         int skip_sum;
1914         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1915
1916         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1917
1918         if (btrfs_is_free_space_inode(inode))
1919                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1920
1921         if (!(rw & REQ_WRITE)) {
1922                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1923                 if (ret)
1924                         goto out;
1925
1926                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1927                         ret = btrfs_submit_compressed_read(inode, bio,
1928                                                            mirror_num,
1929                                                            bio_flags);
1930                         goto out;
1931                 } else if (!skip_sum) {
1932                         ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1933                         if (ret)
1934                                 goto out;
1935                 }
1936                 goto mapit;
1937         } else if (async && !skip_sum) {
1938                 /* csum items have already been cloned */
1939                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1940                         goto mapit;
1941                 /* we're doing a write, do the async checksumming */
1942                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1943                                    inode, rw, bio, mirror_num,
1944                                    bio_flags, bio_offset,
1945                                    __btrfs_submit_bio_start,
1946                                    __btrfs_submit_bio_done);
1947                 goto out;
1948         } else if (!skip_sum) {
1949                 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1950                 if (ret)
1951                         goto out;
1952         }
1953
1954 mapit:
1955         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1956
1957 out:
1958         if (ret < 0) {
1959                 bio->bi_error = ret;
1960                 bio_endio(bio);
1961         }
1962         return ret;
1963 }
1964
1965 /*
1966  * given a list of ordered sums record them in the inode.  This happens
1967  * at IO completion time based on sums calculated at bio submission time.
1968  */
1969 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1970                              struct inode *inode, u64 file_offset,
1971                              struct list_head *list)
1972 {
1973         struct btrfs_ordered_sum *sum;
1974
1975         list_for_each_entry(sum, list, list) {
1976                 trans->adding_csums = 1;
1977                 btrfs_csum_file_blocks(trans,
1978                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1979                 trans->adding_csums = 0;
1980         }
1981         return 0;
1982 }
1983
1984 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1985                               struct extent_state **cached_state)
1986 {
1987         WARN_ON((end & (PAGE_SIZE - 1)) == 0);
1988         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1989                                    cached_state);
1990 }
1991
1992 /* see btrfs_writepage_start_hook for details on why this is required */
1993 struct btrfs_writepage_fixup {
1994         struct page *page;
1995         struct btrfs_work work;
1996 };
1997
1998 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1999 {
2000         struct btrfs_writepage_fixup *fixup;
2001         struct btrfs_ordered_extent *ordered;
2002         struct extent_state *cached_state = NULL;
2003         struct page *page;
2004         struct inode *inode;
2005         u64 page_start;
2006         u64 page_end;
2007         int ret;
2008
2009         fixup = container_of(work, struct btrfs_writepage_fixup, work);
2010         page = fixup->page;
2011 again:
2012         lock_page(page);
2013         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
2014                 ClearPageChecked(page);
2015                 goto out_page;
2016         }
2017
2018         inode = page->mapping->host;
2019         page_start = page_offset(page);
2020         page_end = page_offset(page) + PAGE_SIZE - 1;
2021
2022         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2023                          &cached_state);
2024
2025         /* already ordered? We're done */
2026         if (PagePrivate2(page))
2027                 goto out;
2028
2029         ordered = btrfs_lookup_ordered_range(inode, page_start,
2030                                         PAGE_SIZE);
2031         if (ordered) {
2032                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2033                                      page_end, &cached_state, GFP_NOFS);
2034                 unlock_page(page);
2035                 btrfs_start_ordered_extent(inode, ordered, 1);
2036                 btrfs_put_ordered_extent(ordered);
2037                 goto again;
2038         }
2039
2040         ret = btrfs_delalloc_reserve_space(inode, page_start,
2041                                            PAGE_SIZE);
2042         if (ret) {
2043                 mapping_set_error(page->mapping, ret);
2044                 end_extent_writepage(page, ret, page_start, page_end);
2045                 ClearPageChecked(page);
2046                 goto out;
2047          }
2048
2049         btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
2050         ClearPageChecked(page);
2051         set_page_dirty(page);
2052 out:
2053         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2054                              &cached_state, GFP_NOFS);
2055 out_page:
2056         unlock_page(page);
2057         put_page(page);
2058         kfree(fixup);
2059 }
2060
2061 /*
2062  * There are a few paths in the higher layers of the kernel that directly
2063  * set the page dirty bit without asking the filesystem if it is a
2064  * good idea.  This causes problems because we want to make sure COW
2065  * properly happens and the data=ordered rules are followed.
2066  *
2067  * In our case any range that doesn't have the ORDERED bit set
2068  * hasn't been properly setup for IO.  We kick off an async process
2069  * to fix it up.  The async helper will wait for ordered extents, set
2070  * the delalloc bit and make it safe to write the page.
2071  */
2072 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2073 {
2074         struct inode *inode = page->mapping->host;
2075         struct btrfs_writepage_fixup *fixup;
2076         struct btrfs_root *root = BTRFS_I(inode)->root;
2077
2078         /* this page is properly in the ordered list */
2079         if (TestClearPagePrivate2(page))
2080                 return 0;
2081
2082         if (PageChecked(page))
2083                 return -EAGAIN;
2084
2085         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2086         if (!fixup)
2087                 return -EAGAIN;
2088
2089         SetPageChecked(page);
2090         get_page(page);
2091         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2092                         btrfs_writepage_fixup_worker, NULL, NULL);
2093         fixup->page = page;
2094         btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
2095         return -EBUSY;
2096 }
2097
2098 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2099                                        struct inode *inode, u64 file_pos,
2100                                        u64 disk_bytenr, u64 disk_num_bytes,
2101                                        u64 num_bytes, u64 ram_bytes,
2102                                        u8 compression, u8 encryption,
2103                                        u16 other_encoding, int extent_type)
2104 {
2105         struct btrfs_root *root = BTRFS_I(inode)->root;
2106         struct btrfs_file_extent_item *fi;
2107         struct btrfs_path *path;
2108         struct extent_buffer *leaf;
2109         struct btrfs_key ins;
2110         int extent_inserted = 0;
2111         int ret;
2112
2113         path = btrfs_alloc_path();
2114         if (!path)
2115                 return -ENOMEM;
2116
2117         /*
2118          * we may be replacing one extent in the tree with another.
2119          * The new extent is pinned in the extent map, and we don't want
2120          * to drop it from the cache until it is completely in the btree.
2121          *
2122          * So, tell btrfs_drop_extents to leave this extent in the cache.
2123          * the caller is expected to unpin it and allow it to be merged
2124          * with the others.
2125          */
2126         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2127                                    file_pos + num_bytes, NULL, 0,
2128                                    1, sizeof(*fi), &extent_inserted);
2129         if (ret)
2130                 goto out;
2131
2132         if (!extent_inserted) {
2133                 ins.objectid = btrfs_ino(inode);
2134                 ins.offset = file_pos;
2135                 ins.type = BTRFS_EXTENT_DATA_KEY;
2136
2137                 path->leave_spinning = 1;
2138                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2139                                               sizeof(*fi));
2140                 if (ret)
2141                         goto out;
2142         }
2143         leaf = path->nodes[0];
2144         fi = btrfs_item_ptr(leaf, path->slots[0],
2145                             struct btrfs_file_extent_item);
2146         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2147         btrfs_set_file_extent_type(leaf, fi, extent_type);
2148         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2149         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2150         btrfs_set_file_extent_offset(leaf, fi, 0);
2151         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2152         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2153         btrfs_set_file_extent_compression(leaf, fi, compression);
2154         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2155         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2156
2157         btrfs_mark_buffer_dirty(leaf);
2158         btrfs_release_path(path);
2159
2160         inode_add_bytes(inode, num_bytes);
2161
2162         ins.objectid = disk_bytenr;
2163         ins.offset = disk_num_bytes;
2164         ins.type = BTRFS_EXTENT_ITEM_KEY;
2165         ret = btrfs_alloc_reserved_file_extent(trans, root,
2166                                         root->root_key.objectid,
2167                                         btrfs_ino(inode), file_pos,
2168                                         ram_bytes, &ins);
2169         /*
2170          * Release the reserved range from inode dirty range map, as it is
2171          * already moved into delayed_ref_head
2172          */
2173         btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2174 out:
2175         btrfs_free_path(path);
2176
2177         return ret;
2178 }
2179
2180 /* snapshot-aware defrag */
2181 struct sa_defrag_extent_backref {
2182         struct rb_node node;
2183         struct old_sa_defrag_extent *old;
2184         u64 root_id;
2185         u64 inum;
2186         u64 file_pos;
2187         u64 extent_offset;
2188         u64 num_bytes;
2189         u64 generation;
2190 };
2191
2192 struct old_sa_defrag_extent {
2193         struct list_head list;
2194         struct new_sa_defrag_extent *new;
2195
2196         u64 extent_offset;
2197         u64 bytenr;
2198         u64 offset;
2199         u64 len;
2200         int count;
2201 };
2202
2203 struct new_sa_defrag_extent {
2204         struct rb_root root;
2205         struct list_head head;
2206         struct btrfs_path *path;
2207         struct inode *inode;
2208         u64 file_pos;
2209         u64 len;
2210         u64 bytenr;
2211         u64 disk_len;
2212         u8 compress_type;
2213 };
2214
2215 static int backref_comp(struct sa_defrag_extent_backref *b1,
2216                         struct sa_defrag_extent_backref *b2)
2217 {
2218         if (b1->root_id < b2->root_id)
2219                 return -1;
2220         else if (b1->root_id > b2->root_id)
2221                 return 1;
2222
2223         if (b1->inum < b2->inum)
2224                 return -1;
2225         else if (b1->inum > b2->inum)
2226                 return 1;
2227
2228         if (b1->file_pos < b2->file_pos)
2229                 return -1;
2230         else if (b1->file_pos > b2->file_pos)
2231                 return 1;
2232
2233         /*
2234          * [------------------------------] ===> (a range of space)
2235          *     |<--->|   |<---->| =============> (fs/file tree A)
2236          * |<---------------------------->| ===> (fs/file tree B)
2237          *
2238          * A range of space can refer to two file extents in one tree while
2239          * refer to only one file extent in another tree.
2240          *
2241          * So we may process a disk offset more than one time(two extents in A)
2242          * and locate at the same extent(one extent in B), then insert two same
2243          * backrefs(both refer to the extent in B).
2244          */
2245         return 0;
2246 }
2247
2248 static void backref_insert(struct rb_root *root,
2249                            struct sa_defrag_extent_backref *backref)
2250 {
2251         struct rb_node **p = &root->rb_node;
2252         struct rb_node *parent = NULL;
2253         struct sa_defrag_extent_backref *entry;
2254         int ret;
2255
2256         while (*p) {
2257                 parent = *p;
2258                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2259
2260                 ret = backref_comp(backref, entry);
2261                 if (ret < 0)
2262                         p = &(*p)->rb_left;
2263                 else
2264                         p = &(*p)->rb_right;
2265         }
2266
2267         rb_link_node(&backref->node, parent, p);
2268         rb_insert_color(&backref->node, root);
2269 }
2270
2271 /*
2272  * Note the backref might has changed, and in this case we just return 0.
2273  */
2274 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2275                                        void *ctx)
2276 {
2277         struct btrfs_file_extent_item *extent;
2278         struct btrfs_fs_info *fs_info;
2279         struct old_sa_defrag_extent *old = ctx;
2280         struct new_sa_defrag_extent *new = old->new;
2281         struct btrfs_path *path = new->path;
2282         struct btrfs_key key;
2283         struct btrfs_root *root;
2284         struct sa_defrag_extent_backref *backref;
2285         struct extent_buffer *leaf;
2286         struct inode *inode = new->inode;
2287         int slot;
2288         int ret;
2289         u64 extent_offset;
2290         u64 num_bytes;
2291
2292         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2293             inum == btrfs_ino(inode))
2294                 return 0;
2295
2296         key.objectid = root_id;
2297         key.type = BTRFS_ROOT_ITEM_KEY;
2298         key.offset = (u64)-1;
2299
2300         fs_info = BTRFS_I(inode)->root->fs_info;
2301         root = btrfs_read_fs_root_no_name(fs_info, &key);
2302         if (IS_ERR(root)) {
2303                 if (PTR_ERR(root) == -ENOENT)
2304                         return 0;
2305                 WARN_ON(1);
2306                 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2307                          inum, offset, root_id);
2308                 return PTR_ERR(root);
2309         }
2310
2311         key.objectid = inum;
2312         key.type = BTRFS_EXTENT_DATA_KEY;
2313         if (offset > (u64)-1 << 32)
2314                 key.offset = 0;
2315         else
2316                 key.offset = offset;
2317
2318         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2319         if (WARN_ON(ret < 0))
2320                 return ret;
2321         ret = 0;
2322
2323         while (1) {
2324                 cond_resched();
2325
2326                 leaf = path->nodes[0];
2327                 slot = path->slots[0];
2328
2329                 if (slot >= btrfs_header_nritems(leaf)) {
2330                         ret = btrfs_next_leaf(root, path);
2331                         if (ret < 0) {
2332                                 goto out;
2333                         } else if (ret > 0) {
2334                                 ret = 0;
2335                                 goto out;
2336                         }
2337                         continue;
2338                 }
2339
2340                 path->slots[0]++;
2341
2342                 btrfs_item_key_to_cpu(leaf, &key, slot);
2343
2344                 if (key.objectid > inum)
2345                         goto out;
2346
2347                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2348                         continue;
2349
2350                 extent = btrfs_item_ptr(leaf, slot,
2351                                         struct btrfs_file_extent_item);
2352
2353                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2354                         continue;
2355
2356                 /*
2357                  * 'offset' refers to the exact key.offset,
2358                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2359                  * (key.offset - extent_offset).
2360                  */
2361                 if (key.offset != offset)
2362                         continue;
2363
2364                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2365                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2366
2367                 if (extent_offset >= old->extent_offset + old->offset +
2368                     old->len || extent_offset + num_bytes <=
2369                     old->extent_offset + old->offset)
2370                         continue;
2371                 break;
2372         }
2373
2374         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2375         if (!backref) {
2376                 ret = -ENOENT;
2377                 goto out;
2378         }
2379
2380         backref->root_id = root_id;
2381         backref->inum = inum;
2382         backref->file_pos = offset;
2383         backref->num_bytes = num_bytes;
2384         backref->extent_offset = extent_offset;
2385         backref->generation = btrfs_file_extent_generation(leaf, extent);
2386         backref->old = old;
2387         backref_insert(&new->root, backref);
2388         old->count++;
2389 out:
2390         btrfs_release_path(path);
2391         WARN_ON(ret);
2392         return ret;
2393 }
2394
2395 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2396                                    struct new_sa_defrag_extent *new)
2397 {
2398         struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2399         struct old_sa_defrag_extent *old, *tmp;
2400         int ret;
2401
2402         new->path = path;
2403
2404         list_for_each_entry_safe(old, tmp, &new->head, list) {
2405                 ret = iterate_inodes_from_logical(old->bytenr +
2406                                                   old->extent_offset, fs_info,
2407                                                   path, record_one_backref,
2408                                                   old);
2409                 if (ret < 0 && ret != -ENOENT)
2410                         return false;
2411
2412                 /* no backref to be processed for this extent */
2413                 if (!old->count) {
2414                         list_del(&old->list);
2415                         kfree(old);
2416                 }
2417         }
2418
2419         if (list_empty(&new->head))
2420                 return false;
2421
2422         return true;
2423 }
2424
2425 static int relink_is_mergable(struct extent_buffer *leaf,
2426                               struct btrfs_file_extent_item *fi,
2427                               struct new_sa_defrag_extent *new)
2428 {
2429         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2430                 return 0;
2431
2432         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2433                 return 0;
2434
2435         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2436                 return 0;
2437
2438         if (btrfs_file_extent_encryption(leaf, fi) ||
2439             btrfs_file_extent_other_encoding(leaf, fi))
2440                 return 0;
2441
2442         return 1;
2443 }
2444
2445 /*
2446  * Note the backref might has changed, and in this case we just return 0.
2447  */
2448 static noinline int relink_extent_backref(struct btrfs_path *path,
2449                                  struct sa_defrag_extent_backref *prev,
2450                                  struct sa_defrag_extent_backref *backref)
2451 {
2452         struct btrfs_file_extent_item *extent;
2453         struct btrfs_file_extent_item *item;
2454         struct btrfs_ordered_extent *ordered;
2455         struct btrfs_trans_handle *trans;
2456         struct btrfs_fs_info *fs_info;
2457         struct btrfs_root *root;
2458         struct btrfs_key key;
2459         struct extent_buffer *leaf;
2460         struct old_sa_defrag_extent *old = backref->old;
2461         struct new_sa_defrag_extent *new = old->new;
2462         struct inode *src_inode = new->inode;
2463         struct inode *inode;
2464         struct extent_state *cached = NULL;
2465         int ret = 0;
2466         u64 start;
2467         u64 len;
2468         u64 lock_start;
2469         u64 lock_end;
2470         bool merge = false;
2471         int index;
2472
2473         if (prev && prev->root_id == backref->root_id &&
2474             prev->inum == backref->inum &&
2475             prev->file_pos + prev->num_bytes == backref->file_pos)
2476                 merge = true;
2477
2478         /* step 1: get root */
2479         key.objectid = backref->root_id;
2480         key.type = BTRFS_ROOT_ITEM_KEY;
2481         key.offset = (u64)-1;
2482
2483         fs_info = BTRFS_I(src_inode)->root->fs_info;
2484         index = srcu_read_lock(&fs_info->subvol_srcu);
2485
2486         root = btrfs_read_fs_root_no_name(fs_info, &key);
2487         if (IS_ERR(root)) {
2488                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2489                 if (PTR_ERR(root) == -ENOENT)
2490                         return 0;
2491                 return PTR_ERR(root);
2492         }
2493
2494         if (btrfs_root_readonly(root)) {
2495                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2496                 return 0;
2497         }
2498
2499         /* step 2: get inode */
2500         key.objectid = backref->inum;
2501         key.type = BTRFS_INODE_ITEM_KEY;
2502         key.offset = 0;
2503
2504         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2505         if (IS_ERR(inode)) {
2506                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2507                 return 0;
2508         }
2509
2510         srcu_read_unlock(&fs_info->subvol_srcu, index);
2511
2512         /* step 3: relink backref */
2513         lock_start = backref->file_pos;
2514         lock_end = backref->file_pos + backref->num_bytes - 1;
2515         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2516                          &cached);
2517
2518         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2519         if (ordered) {
2520                 btrfs_put_ordered_extent(ordered);
2521                 goto out_unlock;
2522         }
2523
2524         trans = btrfs_join_transaction(root);
2525         if (IS_ERR(trans)) {
2526                 ret = PTR_ERR(trans);
2527                 goto out_unlock;
2528         }
2529
2530         key.objectid = backref->inum;
2531         key.type = BTRFS_EXTENT_DATA_KEY;
2532         key.offset = backref->file_pos;
2533
2534         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2535         if (ret < 0) {
2536                 goto out_free_path;
2537         } else if (ret > 0) {
2538                 ret = 0;
2539                 goto out_free_path;
2540         }
2541
2542         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2543                                 struct btrfs_file_extent_item);
2544
2545         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2546             backref->generation)
2547                 goto out_free_path;
2548
2549         btrfs_release_path(path);
2550
2551         start = backref->file_pos;
2552         if (backref->extent_offset < old->extent_offset + old->offset)
2553                 start += old->extent_offset + old->offset -
2554                          backref->extent_offset;
2555
2556         len = min(backref->extent_offset + backref->num_bytes,
2557                   old->extent_offset + old->offset + old->len);
2558         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2559
2560         ret = btrfs_drop_extents(trans, root, inode, start,
2561                                  start + len, 1);
2562         if (ret)
2563                 goto out_free_path;
2564 again:
2565         key.objectid = btrfs_ino(inode);
2566         key.type = BTRFS_EXTENT_DATA_KEY;
2567         key.offset = start;
2568
2569         path->leave_spinning = 1;
2570         if (merge) {
2571                 struct btrfs_file_extent_item *fi;
2572                 u64 extent_len;
2573                 struct btrfs_key found_key;
2574
2575                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2576                 if (ret < 0)
2577                         goto out_free_path;
2578
2579                 path->slots[0]--;
2580                 leaf = path->nodes[0];
2581                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2582
2583                 fi = btrfs_item_ptr(leaf, path->slots[0],
2584                                     struct btrfs_file_extent_item);
2585                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2586
2587                 if (extent_len + found_key.offset == start &&
2588                     relink_is_mergable(leaf, fi, new)) {
2589                         btrfs_set_file_extent_num_bytes(leaf, fi,
2590                                                         extent_len + len);
2591                         btrfs_mark_buffer_dirty(leaf);
2592                         inode_add_bytes(inode, len);
2593
2594                         ret = 1;
2595                         goto out_free_path;
2596                 } else {
2597                         merge = false;
2598                         btrfs_release_path(path);
2599                         goto again;
2600                 }
2601         }
2602
2603         ret = btrfs_insert_empty_item(trans, root, path, &key,
2604                                         sizeof(*extent));
2605         if (ret) {
2606                 btrfs_abort_transaction(trans, root, ret);
2607                 goto out_free_path;
2608         }
2609
2610         leaf = path->nodes[0];
2611         item = btrfs_item_ptr(leaf, path->slots[0],
2612                                 struct btrfs_file_extent_item);
2613         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2614         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2615         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2616         btrfs_set_file_extent_num_bytes(leaf, item, len);
2617         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2618         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2619         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2620         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2621         btrfs_set_file_extent_encryption(leaf, item, 0);
2622         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2623
2624         btrfs_mark_buffer_dirty(leaf);
2625         inode_add_bytes(inode, len);
2626         btrfs_release_path(path);
2627
2628         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2629                         new->disk_len, 0,
2630                         backref->root_id, backref->inum,
2631                         new->file_pos); /* start - extent_offset */
2632         if (ret) {
2633                 btrfs_abort_transaction(trans, root, ret);
2634                 goto out_free_path;
2635         }
2636
2637         ret = 1;
2638 out_free_path:
2639         btrfs_release_path(path);
2640         path->leave_spinning = 0;
2641         btrfs_end_transaction(trans, root);
2642 out_unlock:
2643         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2644                              &cached, GFP_NOFS);
2645         iput(inode);
2646         return ret;
2647 }
2648
2649 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2650 {
2651         struct old_sa_defrag_extent *old, *tmp;
2652
2653         if (!new)
2654                 return;
2655
2656         list_for_each_entry_safe(old, tmp, &new->head, list) {
2657                 kfree(old);
2658         }
2659         kfree(new);
2660 }
2661
2662 static void relink_file_extents(struct new_sa_defrag_extent *new)
2663 {
2664         struct btrfs_path *path;
2665         struct sa_defrag_extent_backref *backref;
2666         struct sa_defrag_extent_backref *prev = NULL;
2667         struct inode *inode;
2668         struct btrfs_root *root;
2669         struct rb_node *node;
2670         int ret;
2671
2672         inode = new->inode;
2673         root = BTRFS_I(inode)->root;
2674
2675         path = btrfs_alloc_path();
2676         if (!path)
2677                 return;
2678
2679         if (!record_extent_backrefs(path, new)) {
2680                 btrfs_free_path(path);
2681                 goto out;
2682         }
2683         btrfs_release_path(path);
2684
2685         while (1) {
2686                 node = rb_first(&new->root);
2687                 if (!node)
2688                         break;
2689                 rb_erase(node, &new->root);
2690
2691                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2692
2693                 ret = relink_extent_backref(path, prev, backref);
2694                 WARN_ON(ret < 0);
2695
2696                 kfree(prev);
2697
2698                 if (ret == 1)
2699                         prev = backref;
2700                 else
2701                         prev = NULL;
2702                 cond_resched();
2703         }
2704         kfree(prev);
2705
2706         btrfs_free_path(path);
2707 out:
2708         free_sa_defrag_extent(new);
2709
2710         atomic_dec(&root->fs_info->defrag_running);
2711         wake_up(&root->fs_info->transaction_wait);
2712 }
2713
2714 static struct new_sa_defrag_extent *
2715 record_old_file_extents(struct inode *inode,
2716                         struct btrfs_ordered_extent *ordered)
2717 {
2718         struct btrfs_root *root = BTRFS_I(inode)->root;
2719         struct btrfs_path *path;
2720         struct btrfs_key key;
2721         struct old_sa_defrag_extent *old;
2722         struct new_sa_defrag_extent *new;
2723         int ret;
2724
2725         new = kmalloc(sizeof(*new), GFP_NOFS);
2726         if (!new)
2727                 return NULL;
2728
2729         new->inode = inode;
2730         new->file_pos = ordered->file_offset;
2731         new->len = ordered->len;
2732         new->bytenr = ordered->start;
2733         new->disk_len = ordered->disk_len;
2734         new->compress_type = ordered->compress_type;
2735         new->root = RB_ROOT;
2736         INIT_LIST_HEAD(&new->head);
2737
2738         path = btrfs_alloc_path();
2739         if (!path)
2740                 goto out_kfree;
2741
2742         key.objectid = btrfs_ino(inode);
2743         key.type = BTRFS_EXTENT_DATA_KEY;
2744         key.offset = new->file_pos;
2745
2746         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2747         if (ret < 0)
2748                 goto out_free_path;
2749         if (ret > 0 && path->slots[0] > 0)
2750                 path->slots[0]--;
2751
2752         /* find out all the old extents for the file range */
2753         while (1) {
2754                 struct btrfs_file_extent_item *extent;
2755                 struct extent_buffer *l;
2756                 int slot;
2757                 u64 num_bytes;
2758                 u64 offset;
2759                 u64 end;
2760                 u64 disk_bytenr;
2761                 u64 extent_offset;
2762
2763                 l = path->nodes[0];
2764                 slot = path->slots[0];
2765
2766                 if (slot >= btrfs_header_nritems(l)) {
2767                         ret = btrfs_next_leaf(root, path);
2768                         if (ret < 0)
2769                                 goto out_free_path;
2770                         else if (ret > 0)
2771                                 break;
2772                         continue;
2773                 }
2774
2775                 btrfs_item_key_to_cpu(l, &key, slot);
2776
2777                 if (key.objectid != btrfs_ino(inode))
2778                         break;
2779                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2780                         break;
2781                 if (key.offset >= new->file_pos + new->len)
2782                         break;
2783
2784                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2785
2786                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2787                 if (key.offset + num_bytes < new->file_pos)
2788                         goto next;
2789
2790                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2791                 if (!disk_bytenr)
2792                         goto next;
2793
2794                 extent_offset = btrfs_file_extent_offset(l, extent);
2795
2796                 old = kmalloc(sizeof(*old), GFP_NOFS);
2797                 if (!old)
2798                         goto out_free_path;
2799
2800                 offset = max(new->file_pos, key.offset);
2801                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2802
2803                 old->bytenr = disk_bytenr;
2804                 old->extent_offset = extent_offset;
2805                 old->offset = offset - key.offset;
2806                 old->len = end - offset;
2807                 old->new = new;
2808                 old->count = 0;
2809                 list_add_tail(&old->list, &new->head);
2810 next:
2811                 path->slots[0]++;
2812                 cond_resched();
2813         }
2814
2815         btrfs_free_path(path);
2816         atomic_inc(&root->fs_info->defrag_running);
2817
2818         return new;
2819
2820 out_free_path:
2821         btrfs_free_path(path);
2822 out_kfree:
2823         free_sa_defrag_extent(new);
2824         return NULL;
2825 }
2826
2827 static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
2828                                          u64 start, u64 len)
2829 {
2830         struct btrfs_block_group_cache *cache;
2831
2832         cache = btrfs_lookup_block_group(root->fs_info, start);
2833         ASSERT(cache);
2834
2835         spin_lock(&cache->lock);
2836         cache->delalloc_bytes -= len;
2837         spin_unlock(&cache->lock);
2838
2839         btrfs_put_block_group(cache);
2840 }
2841
2842 /* as ordered data IO finishes, this gets called so we can finish
2843  * an ordered extent if the range of bytes in the file it covers are
2844  * fully written.
2845  */
2846 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2847 {
2848         struct inode *inode = ordered_extent->inode;
2849         struct btrfs_root *root = BTRFS_I(inode)->root;
2850         struct btrfs_trans_handle *trans = NULL;
2851         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2852         struct extent_state *cached_state = NULL;
2853         struct new_sa_defrag_extent *new = NULL;
2854         int compress_type = 0;
2855         int ret = 0;
2856         u64 logical_len = ordered_extent->len;
2857         bool nolock;
2858         bool truncated = false;
2859
2860         nolock = btrfs_is_free_space_inode(inode);
2861
2862         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2863                 ret = -EIO;
2864                 goto out;
2865         }
2866
2867         btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
2868                                      ordered_extent->file_offset +
2869                                      ordered_extent->len - 1);
2870
2871         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2872                 truncated = true;
2873                 logical_len = ordered_extent->truncated_len;
2874                 /* Truncated the entire extent, don't bother adding */
2875                 if (!logical_len)
2876                         goto out;
2877         }
2878
2879         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2880                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2881
2882                 /*
2883                  * For mwrite(mmap + memset to write) case, we still reserve
2884                  * space for NOCOW range.
2885                  * As NOCOW won't cause a new delayed ref, just free the space
2886                  */
2887                 btrfs_qgroup_free_data(inode, ordered_extent->file_offset,
2888                                        ordered_extent->len);
2889                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2890                 if (nolock)
2891                         trans = btrfs_join_transaction_nolock(root);
2892                 else
2893                         trans = btrfs_join_transaction(root);
2894                 if (IS_ERR(trans)) {
2895                         ret = PTR_ERR(trans);
2896                         trans = NULL;
2897                         goto out;
2898                 }
2899                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2900                 ret = btrfs_update_inode_fallback(trans, root, inode);
2901                 if (ret) /* -ENOMEM or corruption */
2902                         btrfs_abort_transaction(trans, root, ret);
2903                 goto out;
2904         }
2905
2906         lock_extent_bits(io_tree, ordered_extent->file_offset,
2907                          ordered_extent->file_offset + ordered_extent->len - 1,
2908                          &cached_state);
2909
2910         ret = test_range_bit(io_tree, ordered_extent->file_offset,
2911                         ordered_extent->file_offset + ordered_extent->len - 1,
2912                         EXTENT_DEFRAG, 1, cached_state);
2913         if (ret) {
2914                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2915                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2916                         /* the inode is shared */
2917                         new = record_old_file_extents(inode, ordered_extent);
2918
2919                 clear_extent_bit(io_tree, ordered_extent->file_offset,
2920                         ordered_extent->file_offset + ordered_extent->len - 1,
2921                         EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2922         }
2923
2924         if (nolock)
2925                 trans = btrfs_join_transaction_nolock(root);
2926         else
2927                 trans = btrfs_join_transaction(root);
2928         if (IS_ERR(trans)) {
2929                 ret = PTR_ERR(trans);
2930                 trans = NULL;
2931                 goto out_unlock;
2932         }
2933
2934         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2935
2936         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2937                 compress_type = ordered_extent->compress_type;
2938         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2939                 BUG_ON(compress_type);
2940                 ret = btrfs_mark_extent_written(trans, inode,
2941                                                 ordered_extent->file_offset,
2942                                                 ordered_extent->file_offset +
2943                                                 logical_len);
2944         } else {
2945                 BUG_ON(root == root->fs_info->tree_root);
2946                 ret = insert_reserved_file_extent(trans, inode,
2947                                                 ordered_extent->file_offset,
2948                                                 ordered_extent->start,
2949                                                 ordered_extent->disk_len,
2950                                                 logical_len, logical_len,
2951                                                 compress_type, 0, 0,
2952                                                 BTRFS_FILE_EXTENT_REG);
2953                 if (!ret)
2954                         btrfs_release_delalloc_bytes(root,
2955                                                      ordered_extent->start,
2956                                                      ordered_extent->disk_len);
2957         }
2958         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2959                            ordered_extent->file_offset, ordered_extent->len,
2960                            trans->transid);
2961         if (ret < 0) {
2962                 btrfs_abort_transaction(trans, root, ret);
2963                 goto out_unlock;
2964         }
2965
2966         add_pending_csums(trans, inode, ordered_extent->file_offset,
2967                           &ordered_extent->list);
2968
2969         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2970         ret = btrfs_update_inode_fallback(trans, root, inode);
2971         if (ret) { /* -ENOMEM or corruption */
2972                 btrfs_abort_transaction(trans, root, ret);
2973                 goto out_unlock;
2974         }
2975         ret = 0;
2976 out_unlock:
2977         unlock_extent_cached(io_tree, ordered_extent->file_offset,
2978                              ordered_extent->file_offset +
2979                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
2980 out:
2981         if (root != root->fs_info->tree_root)
2982                 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2983         if (trans)
2984                 btrfs_end_transaction(trans, root);
2985
2986         if (ret || truncated) {
2987                 u64 start, end;
2988
2989                 if (truncated)
2990                         start = ordered_extent->file_offset + logical_len;
2991                 else
2992                         start = ordered_extent->file_offset;
2993                 end = ordered_extent->file_offset + ordered_extent->len - 1;
2994                 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2995
2996                 /* Drop the cache for the part of the extent we didn't write. */
2997                 btrfs_drop_extent_cache(inode, start, end, 0);
2998
2999                 /*
3000                  * If the ordered extent had an IOERR or something else went
3001                  * wrong we need to return the space for this ordered extent
3002                  * back to the allocator.  We only free the extent in the
3003                  * truncated case if we didn't write out the extent at all.
3004                  */
3005                 if ((ret || !logical_len) &&
3006                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
3007                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
3008                         btrfs_free_reserved_extent(root, ordered_extent->start,
3009                                                    ordered_extent->disk_len, 1);
3010         }
3011
3012
3013         /*
3014          * This needs to be done to make sure anybody waiting knows we are done
3015          * updating everything for this ordered extent.
3016          */
3017         btrfs_remove_ordered_extent(inode, ordered_extent);
3018
3019         /* for snapshot-aware defrag */
3020         if (new) {
3021                 if (ret) {
3022                         free_sa_defrag_extent(new);
3023                         atomic_dec(&root->fs_info->defrag_running);
3024                 } else {
3025                         relink_file_extents(new);
3026                 }
3027         }
3028
3029         /* once for us */
3030         btrfs_put_ordered_extent(ordered_extent);
3031         /* once for the tree */
3032         btrfs_put_ordered_extent(ordered_extent);
3033
3034         return ret;
3035 }
3036
3037 static void finish_ordered_fn(struct btrfs_work *work)
3038 {
3039         struct btrfs_ordered_extent *ordered_extent;
3040         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3041         btrfs_finish_ordered_io(ordered_extent);
3042 }
3043
3044 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3045                                 struct extent_state *state, int uptodate)
3046 {
3047         struct inode *inode = page->mapping->host;
3048         struct btrfs_root *root = BTRFS_I(inode)->root;
3049         struct btrfs_ordered_extent *ordered_extent = NULL;
3050         struct btrfs_workqueue *wq;
3051         btrfs_work_func_t func;
3052
3053         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3054
3055         ClearPagePrivate2(page);
3056         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3057                                             end - start + 1, uptodate))
3058                 return 0;
3059
3060         if (btrfs_is_free_space_inode(inode)) {
3061                 wq = root->fs_info->endio_freespace_worker;
3062                 func = btrfs_freespace_write_helper;
3063         } else {
3064                 wq = root->fs_info->endio_write_workers;
3065                 func = btrfs_endio_write_helper;
3066         }
3067
3068         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3069                         NULL);
3070         btrfs_queue_work(wq, &ordered_extent->work);
3071
3072         return 0;
3073 }
3074
3075 static int __readpage_endio_check(struct inode *inode,
3076                                   struct btrfs_io_bio *io_bio,
3077                                   int icsum, struct page *page,
3078                                   int pgoff, u64 start, size_t len)
3079 {
3080         char *kaddr;
3081         u32 csum_expected;
3082         u32 csum = ~(u32)0;
3083
3084         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3085
3086         kaddr = kmap_atomic(page);
3087         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3088         btrfs_csum_final(csum, (char *)&csum);
3089         if (csum != csum_expected)
3090                 goto zeroit;
3091
3092         kunmap_atomic(kaddr);
3093         return 0;
3094 zeroit:
3095         btrfs_warn_rl(BTRFS_I(inode)->root->fs_info,
3096                 "csum failed ino %llu off %llu csum %u expected csum %u",
3097                            btrfs_ino(inode), start, csum, csum_expected);
3098         memset(kaddr + pgoff, 1, len);
3099         flush_dcache_page(page);
3100         kunmap_atomic(kaddr);
3101         if (csum_expected == 0)
3102                 return 0;
3103         return -EIO;
3104 }
3105
3106 /*
3107  * when reads are done, we need to check csums to verify the data is correct
3108  * if there's a match, we allow the bio to finish.  If not, the code in
3109  * extent_io.c will try to find good copies for us.
3110  */
3111 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3112                                       u64 phy_offset, struct page *page,
3113                                       u64 start, u64 end, int mirror)
3114 {
3115         size_t offset = start - page_offset(page);
3116         struct inode *inode = page->mapping->host;
3117         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3118         struct btrfs_root *root = BTRFS_I(inode)->root;
3119
3120         if (PageChecked(page)) {
3121                 ClearPageChecked(page);
3122                 return 0;
3123         }
3124
3125         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3126                 return 0;
3127
3128         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3129             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3130                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
3131                 return 0;
3132         }
3133
3134         phy_offset >>= inode->i_sb->s_blocksize_bits;
3135         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3136                                       start, (size_t)(end - start + 1));
3137 }
3138
3139 void btrfs_add_delayed_iput(struct inode *inode)
3140 {
3141         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3142         struct btrfs_inode *binode = BTRFS_I(inode);
3143
3144         if (atomic_add_unless(&inode->i_count, -1, 1))
3145                 return;
3146
3147         spin_lock(&fs_info->delayed_iput_lock);
3148         if (binode->delayed_iput_count == 0) {
3149                 ASSERT(list_empty(&binode->delayed_iput));
3150                 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3151         } else {
3152                 binode->delayed_iput_count++;
3153         }
3154         spin_unlock(&fs_info->delayed_iput_lock);
3155 }
3156
3157 void btrfs_run_delayed_iputs(struct btrfs_root *root)
3158 {
3159         struct btrfs_fs_info *fs_info = root->fs_info;
3160
3161         spin_lock(&fs_info->delayed_iput_lock);
3162         while (!list_empty(&fs_info->delayed_iputs)) {
3163                 struct btrfs_inode *inode;
3164
3165                 inode = list_first_entry(&fs_info->delayed_iputs,
3166                                 struct btrfs_inode, delayed_iput);
3167                 if (inode->delayed_iput_count) {
3168                         inode->delayed_iput_count--;
3169                         list_move_tail(&inode->delayed_iput,
3170                                         &fs_info->delayed_iputs);
3171                 } else {
3172                         list_del_init(&inode->delayed_iput);
3173                 }
3174                 spin_unlock(&fs_info->delayed_iput_lock);
3175                 iput(&inode->vfs_inode);
3176                 spin_lock(&fs_info->delayed_iput_lock);
3177         }
3178         spin_unlock(&fs_info->delayed_iput_lock);
3179 }
3180
3181 /*
3182  * This is called in transaction commit time. If there are no orphan
3183  * files in the subvolume, it removes orphan item and frees block_rsv
3184  * structure.
3185  */
3186 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3187                               struct btrfs_root *root)
3188 {
3189         struct btrfs_block_rsv *block_rsv;
3190         int ret;
3191
3192         if (atomic_read(&root->orphan_inodes) ||
3193             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3194                 return;
3195
3196         spin_lock(&root->orphan_lock);
3197         if (atomic_read(&root->orphan_inodes)) {
3198                 spin_unlock(&root->orphan_lock);
3199                 return;
3200         }
3201
3202         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3203                 spin_unlock(&root->orphan_lock);
3204                 return;
3205         }
3206
3207         block_rsv = root->orphan_block_rsv;
3208         root->orphan_block_rsv = NULL;
3209         spin_unlock(&root->orphan_lock);
3210
3211         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3212             btrfs_root_refs(&root->root_item) > 0) {
3213                 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
3214                                             root->root_key.objectid);
3215                 if (ret)
3216                         btrfs_abort_transaction(trans, root, ret);
3217                 else
3218                         clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3219                                   &root->state);
3220         }
3221
3222         if (block_rsv) {
3223                 WARN_ON(block_rsv->size > 0);
3224                 btrfs_free_block_rsv(root, block_rsv);
3225         }
3226 }
3227
3228 /*
3229  * This creates an orphan entry for the given inode in case something goes
3230  * wrong in the middle of an unlink/truncate.
3231  *
3232  * NOTE: caller of this function should reserve 5 units of metadata for
3233  *       this function.
3234  */
3235 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3236 {
3237         struct btrfs_root *root = BTRFS_I(inode)->root;
3238         struct btrfs_block_rsv *block_rsv = NULL;
3239         int reserve = 0;
3240         int insert = 0;
3241         int ret;
3242
3243         if (!root->orphan_block_rsv) {
3244                 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3245                 if (!block_rsv)
3246                         return -ENOMEM;
3247         }
3248
3249         spin_lock(&root->orphan_lock);
3250         if (!root->orphan_block_rsv) {
3251                 root->orphan_block_rsv = block_rsv;
3252         } else if (block_rsv) {
3253                 btrfs_free_block_rsv(root, block_rsv);
3254                 block_rsv = NULL;
3255         }
3256
3257         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3258                               &BTRFS_I(inode)->runtime_flags)) {
3259 #if 0
3260                 /*
3261                  * For proper ENOSPC handling, we should do orphan
3262                  * cleanup when mounting. But this introduces backward
3263                  * compatibility issue.
3264                  */
3265                 if (!xchg(&root->orphan_item_inserted, 1))
3266                         insert = 2;
3267                 else
3268                         insert = 1;
3269 #endif
3270                 insert = 1;
3271                 atomic_inc(&root->orphan_inodes);
3272         }
3273
3274         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3275                               &BTRFS_I(inode)->runtime_flags))
3276                 reserve = 1;
3277         spin_unlock(&root->orphan_lock);
3278
3279         /* grab metadata reservation from transaction handle */
3280         if (reserve) {
3281                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3282                 ASSERT(!ret);
3283                 if (ret) {
3284                         atomic_dec(&root->orphan_inodes);
3285                         clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3286                                   &BTRFS_I(inode)->runtime_flags);
3287                         if (insert)
3288                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3289                                           &BTRFS_I(inode)->runtime_flags);
3290                         return ret;
3291                 }
3292         }
3293
3294         /* insert an orphan item to track this unlinked/truncated file */
3295         if (insert >= 1) {
3296                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3297                 if (ret) {
3298                         atomic_dec(&root->orphan_inodes);
3299                         if (reserve) {
3300                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3301                                           &BTRFS_I(inode)->runtime_flags);
3302                                 btrfs_orphan_release_metadata(inode);
3303                         }
3304                         if (ret != -EEXIST) {
3305                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3306                                           &BTRFS_I(inode)->runtime_flags);
3307                                 btrfs_abort_transaction(trans, root, ret);
3308                                 return ret;
3309                         }
3310                 }
3311                 ret = 0;
3312         }
3313
3314         /* insert an orphan item to track subvolume contains orphan files */
3315         if (insert >= 2) {
3316                 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3317                                                root->root_key.objectid);
3318                 if (ret && ret != -EEXIST) {
3319                         btrfs_abort_transaction(trans, root, ret);
3320                         return ret;
3321                 }
3322         }
3323         return 0;
3324 }
3325
3326 /*
3327  * We have done the truncate/delete so we can go ahead and remove the orphan
3328  * item for this particular inode.
3329  */
3330 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3331                             struct inode *inode)
3332 {
3333         struct btrfs_root *root = BTRFS_I(inode)->root;
3334         int delete_item = 0;
3335         int release_rsv = 0;
3336         int ret = 0;
3337
3338         spin_lock(&root->orphan_lock);
3339         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3340                                &BTRFS_I(inode)->runtime_flags))
3341                 delete_item = 1;
3342
3343         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3344                                &BTRFS_I(inode)->runtime_flags))
3345                 release_rsv = 1;
3346         spin_unlock(&root->orphan_lock);
3347
3348         if (delete_item) {
3349                 atomic_dec(&root->orphan_inodes);
3350                 if (trans)
3351                         ret = btrfs_del_orphan_item(trans, root,
3352                                                     btrfs_ino(inode));
3353         }
3354
3355         if (release_rsv)
3356                 btrfs_orphan_release_metadata(inode);
3357
3358         return ret;
3359 }
3360
3361 /*
3362  * this cleans up any orphans that may be left on the list from the last use
3363  * of this root.
3364  */
3365 int btrfs_orphan_cleanup(struct btrfs_root *root)
3366 {
3367         struct btrfs_path *path;
3368         struct extent_buffer *leaf;
3369         struct btrfs_key key, found_key;
3370         struct btrfs_trans_handle *trans;
3371         struct inode *inode;
3372         u64 last_objectid = 0;
3373         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3374
3375         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3376                 return 0;
3377
3378         path = btrfs_alloc_path();
3379         if (!path) {
3380                 ret = -ENOMEM;
3381                 goto out;
3382         }
3383         path->reada = READA_BACK;
3384
3385         key.objectid = BTRFS_ORPHAN_OBJECTID;
3386         key.type = BTRFS_ORPHAN_ITEM_KEY;
3387         key.offset = (u64)-1;
3388
3389         while (1) {
3390                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3391                 if (ret < 0)
3392                         goto out;
3393
3394                 /*
3395                  * if ret == 0 means we found what we were searching for, which
3396                  * is weird, but possible, so only screw with path if we didn't
3397                  * find the key and see if we have stuff that matches
3398                  */
3399                 if (ret > 0) {
3400                         ret = 0;
3401                         if (path->slots[0] == 0)
3402                                 break;
3403                         path->slots[0]--;
3404                 }
3405
3406                 /* pull out the item */
3407                 leaf = path->nodes[0];
3408                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3409
3410                 /* make sure the item matches what we want */
3411                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3412                         break;
3413                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3414                         break;
3415
3416                 /* release the path since we're done with it */
3417                 btrfs_release_path(path);
3418
3419                 /*
3420                  * this is where we are basically btrfs_lookup, without the
3421                  * crossing root thing.  we store the inode number in the
3422                  * offset of the orphan item.
3423                  */
3424
3425                 if (found_key.offset == last_objectid) {
3426                         btrfs_err(root->fs_info,
3427                                 "Error removing orphan entry, stopping orphan cleanup");
3428                         ret = -EINVAL;
3429                         goto out;
3430                 }
3431
3432                 last_objectid = found_key.offset;
3433
3434                 found_key.objectid = found_key.offset;
3435                 found_key.type = BTRFS_INODE_ITEM_KEY;
3436                 found_key.offset = 0;
3437                 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3438                 ret = PTR_ERR_OR_ZERO(inode);
3439                 if (ret && ret != -ESTALE)
3440                         goto out;
3441
3442                 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3443                         struct btrfs_root *dead_root;
3444                         struct btrfs_fs_info *fs_info = root->fs_info;
3445                         int is_dead_root = 0;
3446
3447                         /*
3448                          * this is an orphan in the tree root. Currently these
3449                          * could come from 2 sources:
3450                          *  a) a snapshot deletion in progress
3451                          *  b) a free space cache inode
3452                          * We need to distinguish those two, as the snapshot
3453                          * orphan must not get deleted.
3454                          * find_dead_roots already ran before us, so if this
3455                          * is a snapshot deletion, we should find the root
3456                          * in the dead_roots list
3457                          */
3458                         spin_lock(&fs_info->trans_lock);
3459                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3460                                             root_list) {
3461                                 if (dead_root->root_key.objectid ==
3462                                     found_key.objectid) {
3463                                         is_dead_root = 1;
3464                                         break;
3465                                 }
3466                         }
3467                         spin_unlock(&fs_info->trans_lock);
3468                         if (is_dead_root) {
3469                                 /* prevent this orphan from being found again */
3470                                 key.offset = found_key.objectid - 1;
3471                                 continue;
3472                         }
3473                 }
3474                 /*
3475                  * Inode is already gone but the orphan item is still there,
3476                  * kill the orphan item.
3477                  */
3478                 if (ret == -ESTALE) {
3479                         trans = btrfs_start_transaction(root, 1);
3480                         if (IS_ERR(trans)) {
3481                                 ret = PTR_ERR(trans);
3482                                 goto out;
3483                         }
3484                         btrfs_debug(root->fs_info, "auto deleting %Lu",
3485                                 found_key.objectid);
3486                         ret = btrfs_del_orphan_item(trans, root,
3487                                                     found_key.objectid);
3488                         btrfs_end_transaction(trans, root);
3489                         if (ret)
3490                                 goto out;
3491                         continue;
3492                 }
3493
3494                 /*
3495                  * add this inode to the orphan list so btrfs_orphan_del does
3496                  * the proper thing when we hit it
3497                  */
3498                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3499                         &BTRFS_I(inode)->runtime_flags);
3500                 atomic_inc(&root->orphan_inodes);
3501
3502                 /* if we have links, this was a truncate, lets do that */
3503                 if (inode->i_nlink) {
3504                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3505                                 iput(inode);
3506                                 continue;
3507                         }
3508                         nr_truncate++;
3509
3510                         /* 1 for the orphan item deletion. */
3511                         trans = btrfs_start_transaction(root, 1);
3512                         if (IS_ERR(trans)) {
3513                                 iput(inode);
3514                                 ret = PTR_ERR(trans);
3515                                 goto out;
3516                         }
3517                         ret = btrfs_orphan_add(trans, inode);
3518                         btrfs_end_transaction(trans, root);
3519                         if (ret) {
3520                                 iput(inode);
3521                                 goto out;
3522                         }
3523
3524                         ret = btrfs_truncate(inode);
3525                         if (ret)
3526                                 btrfs_orphan_del(NULL, inode);
3527                 } else {
3528                         nr_unlink++;
3529                 }
3530
3531                 /* this will do delete_inode and everything for us */
3532                 iput(inode);
3533                 if (ret)
3534                         goto out;
3535         }
3536         /* release the path since we're done with it */
3537         btrfs_release_path(path);
3538
3539         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3540
3541         if (root->orphan_block_rsv)
3542                 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3543                                         (u64)-1);
3544
3545         if (root->orphan_block_rsv ||
3546             test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3547                 trans = btrfs_join_transaction(root);
3548                 if (!IS_ERR(trans))
3549                         btrfs_end_transaction(trans, root);
3550         }
3551
3552         if (nr_unlink)
3553                 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3554         if (nr_truncate)
3555                 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3556
3557 out:
3558         if (ret)
3559                 btrfs_err(root->fs_info,
3560                         "could not do orphan cleanup %d", ret);
3561         btrfs_free_path(path);
3562         return ret;
3563 }
3564
3565 /*
3566  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3567  * don't find any xattrs, we know there can't be any acls.
3568  *
3569  * slot is the slot the inode is in, objectid is the objectid of the inode
3570  */
3571 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3572                                           int slot, u64 objectid,
3573                                           int *first_xattr_slot)
3574 {
3575         u32 nritems = btrfs_header_nritems(leaf);
3576         struct btrfs_key found_key;
3577         static u64 xattr_access = 0;
3578         static u64 xattr_default = 0;
3579         int scanned = 0;
3580
3581         if (!xattr_access) {
3582                 xattr_access = btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS,
3583                                         strlen(XATTR_NAME_POSIX_ACL_ACCESS));
3584                 xattr_default = btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT,
3585                                         strlen(XATTR_NAME_POSIX_ACL_DEFAULT));
3586         }
3587
3588         slot++;
3589         *first_xattr_slot = -1;
3590         while (slot < nritems) {
3591                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3592
3593                 /* we found a different objectid, there must not be acls */
3594                 if (found_key.objectid != objectid)
3595                         return 0;
3596
3597                 /* we found an xattr, assume we've got an acl */
3598                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3599                         if (*first_xattr_slot == -1)
3600                                 *first_xattr_slot = slot;
3601                         if (found_key.offset == xattr_access ||
3602                             found_key.offset == xattr_default)
3603                                 return 1;
3604                 }
3605
3606                 /*
3607                  * we found a key greater than an xattr key, there can't
3608                  * be any acls later on
3609                  */
3610                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3611                         return 0;
3612
3613                 slot++;
3614                 scanned++;
3615
3616                 /*
3617                  * it goes inode, inode backrefs, xattrs, extents,
3618                  * so if there are a ton of hard links to an inode there can
3619                  * be a lot of backrefs.  Don't waste time searching too hard,
3620                  * this is just an optimization
3621                  */
3622                 if (scanned >= 8)
3623                         break;
3624         }
3625         /* we hit the end of the leaf before we found an xattr or
3626          * something larger than an xattr.  We have to assume the inode
3627          * has acls
3628          */
3629         if (*first_xattr_slot == -1)
3630                 *first_xattr_slot = slot;
3631         return 1;
3632 }
3633
3634 /*
3635  * read an inode from the btree into the in-memory inode
3636  */
3637 static void btrfs_read_locked_inode(struct inode *inode)
3638 {
3639         struct btrfs_path *path;
3640         struct extent_buffer *leaf;
3641         struct btrfs_inode_item *inode_item;
3642         struct btrfs_root *root = BTRFS_I(inode)->root;
3643         struct btrfs_key location;
3644         unsigned long ptr;
3645         int maybe_acls;
3646         u32 rdev;
3647         int ret;
3648         bool filled = false;
3649         int first_xattr_slot;
3650
3651         ret = btrfs_fill_inode(inode, &rdev);
3652         if (!ret)
3653                 filled = true;
3654
3655         path = btrfs_alloc_path();
3656         if (!path)
3657                 goto make_bad;
3658
3659         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3660
3661         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3662         if (ret)
3663                 goto make_bad;
3664
3665         leaf = path->nodes[0];
3666
3667         if (filled)
3668                 goto cache_index;
3669
3670         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3671                                     struct btrfs_inode_item);
3672         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3673         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3674         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3675         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3676         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3677
3678         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3679         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3680
3681         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3682         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3683
3684         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3685         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3686
3687         BTRFS_I(inode)->i_otime.tv_sec =
3688                 btrfs_timespec_sec(leaf, &inode_item->otime);
3689         BTRFS_I(inode)->i_otime.tv_nsec =
3690                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3691
3692         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3693         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3694         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3695
3696         inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3697         inode->i_generation = BTRFS_I(inode)->generation;
3698         inode->i_rdev = 0;
3699         rdev = btrfs_inode_rdev(leaf, inode_item);
3700
3701         BTRFS_I(inode)->index_cnt = (u64)-1;
3702         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3703
3704 cache_index:
3705         /*
3706          * If we were modified in the current generation and evicted from memory
3707          * and then re-read we need to do a full sync since we don't have any
3708          * idea about which extents were modified before we were evicted from
3709          * cache.
3710          *
3711          * This is required for both inode re-read from disk and delayed inode
3712          * in delayed_nodes_tree.
3713          */
3714         if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3715                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3716                         &BTRFS_I(inode)->runtime_flags);
3717
3718         /*
3719          * We don't persist the id of the transaction where an unlink operation
3720          * against the inode was last made. So here we assume the inode might
3721          * have been evicted, and therefore the exact value of last_unlink_trans
3722          * lost, and set it to last_trans to avoid metadata inconsistencies
3723          * between the inode and its parent if the inode is fsync'ed and the log
3724          * replayed. For example, in the scenario:
3725          *
3726          * touch mydir/foo
3727          * ln mydir/foo mydir/bar
3728          * sync
3729          * unlink mydir/bar
3730          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3731          * xfs_io -c fsync mydir/foo
3732          * <power failure>
3733          * mount fs, triggers fsync log replay
3734          *
3735          * We must make sure that when we fsync our inode foo we also log its
3736          * parent inode, otherwise after log replay the parent still has the
3737          * dentry with the "bar" name but our inode foo has a link count of 1
3738          * and doesn't have an inode ref with the name "bar" anymore.
3739          *
3740          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3741          * but it guarantees correctness at the expense of occasional full
3742          * transaction commits on fsync if our inode is a directory, or if our
3743          * inode is not a directory, logging its parent unnecessarily.
3744          */
3745         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3746
3747         path->slots[0]++;
3748         if (inode->i_nlink != 1 ||
3749             path->slots[0] >= btrfs_header_nritems(leaf))
3750                 goto cache_acl;
3751
3752         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3753         if (location.objectid != btrfs_ino(inode))
3754                 goto cache_acl;
3755
3756         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3757         if (location.type == BTRFS_INODE_REF_KEY) {
3758                 struct btrfs_inode_ref *ref;
3759
3760                 ref = (struct btrfs_inode_ref *)ptr;
3761                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3762         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3763                 struct btrfs_inode_extref *extref;
3764
3765                 extref = (struct btrfs_inode_extref *)ptr;
3766                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3767                                                                      extref);
3768         }
3769 cache_acl:
3770         /*
3771          * try to precache a NULL acl entry for files that don't have
3772          * any xattrs or acls
3773          */
3774         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3775                                            btrfs_ino(inode), &first_xattr_slot);
3776         if (first_xattr_slot != -1) {
3777                 path->slots[0] = first_xattr_slot;
3778                 ret = btrfs_load_inode_props(inode, path);
3779                 if (ret)
3780                         btrfs_err(root->fs_info,
3781                                   "error loading props for ino %llu (root %llu): %d",
3782                                   btrfs_ino(inode),
3783                                   root->root_key.objectid, ret);
3784         }
3785         btrfs_free_path(path);
3786
3787         if (!maybe_acls)
3788                 cache_no_acl(inode);
3789
3790         switch (inode->i_mode & S_IFMT) {
3791         case S_IFREG:
3792                 inode->i_mapping->a_ops = &btrfs_aops;
3793                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3794                 inode->i_fop = &btrfs_file_operations;
3795                 inode->i_op = &btrfs_file_inode_operations;
3796                 break;
3797         case S_IFDIR:
3798                 inode->i_fop = &btrfs_dir_file_operations;
3799                 if (root == root->fs_info->tree_root)
3800                         inode->i_op = &btrfs_dir_ro_inode_operations;
3801                 else
3802                         inode->i_op = &btrfs_dir_inode_operations;
3803                 break;
3804         case S_IFLNK:
3805                 inode->i_op = &btrfs_symlink_inode_operations;
3806                 inode_nohighmem(inode);
3807                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3808                 break;
3809         default:
3810                 inode->i_op = &btrfs_special_inode_operations;
3811                 init_special_inode(inode, inode->i_mode, rdev);
3812                 break;
3813         }
3814
3815         btrfs_update_iflags(inode);
3816         return;
3817
3818 make_bad:
3819         btrfs_free_path(path);
3820         make_bad_inode(inode);
3821 }
3822
3823 /*
3824  * given a leaf and an inode, copy the inode fields into the leaf
3825  */
3826 static void fill_inode_item(struct btrfs_trans_handle *trans,
3827                             struct extent_buffer *leaf,
3828                             struct btrfs_inode_item *item,
3829                             struct inode *inode)
3830 {
3831         struct btrfs_map_token token;
3832
3833         btrfs_init_map_token(&token);
3834
3835         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3836         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3837         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3838                                    &token);
3839         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3840         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3841
3842         btrfs_set_token_timespec_sec(leaf, &item->atime,
3843                                      inode->i_atime.tv_sec, &token);
3844         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3845                                       inode->i_atime.tv_nsec, &token);
3846
3847         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3848                                      inode->i_mtime.tv_sec, &token);
3849         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3850                                       inode->i_mtime.tv_nsec, &token);
3851
3852         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3853                                      inode->i_ctime.tv_sec, &token);
3854         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3855                                       inode->i_ctime.tv_nsec, &token);
3856
3857         btrfs_set_token_timespec_sec(leaf, &item->otime,
3858                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3859         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3860                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3861
3862         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3863                                      &token);
3864         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3865                                          &token);
3866         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3867         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3868         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3869         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3870         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3871 }
3872
3873 /*
3874  * copy everything in the in-memory inode into the btree.
3875  */
3876 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3877                                 struct btrfs_root *root, struct inode *inode)
3878 {
3879         struct btrfs_inode_item *inode_item;
3880         struct btrfs_path *path;
3881         struct extent_buffer *leaf;
3882         int ret;
3883
3884         path = btrfs_alloc_path();
3885         if (!path)
3886                 return -ENOMEM;
3887
3888         path->leave_spinning = 1;
3889         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3890                                  1);
3891         if (ret) {
3892                 if (ret > 0)
3893                         ret = -ENOENT;
3894                 goto failed;
3895         }
3896
3897         leaf = path->nodes[0];
3898         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3899                                     struct btrfs_inode_item);
3900
3901         fill_inode_item(trans, leaf, inode_item, inode);
3902         btrfs_mark_buffer_dirty(leaf);
3903         btrfs_set_inode_last_trans(trans, inode);
3904         ret = 0;
3905 failed:
3906         btrfs_free_path(path);
3907         return ret;
3908 }
3909
3910 /*
3911  * copy everything in the in-memory inode into the btree.
3912  */
3913 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3914                                 struct btrfs_root *root, struct inode *inode)
3915 {
3916         int ret;
3917
3918         /*
3919          * If the inode is a free space inode, we can deadlock during commit
3920          * if we put it into the delayed code.
3921          *
3922          * The data relocation inode should also be directly updated
3923          * without delay
3924          */
3925         if (!btrfs_is_free_space_inode(inode)
3926             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3927             && !root->fs_info->log_root_recovering) {
3928                 btrfs_update_root_times(trans, root);
3929
3930                 ret = btrfs_delayed_update_inode(trans, root, inode);
3931                 if (!ret)
3932                         btrfs_set_inode_last_trans(trans, inode);
3933                 return ret;
3934         }
3935
3936         return btrfs_update_inode_item(trans, root, inode);
3937 }
3938
3939 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3940                                          struct btrfs_root *root,
3941                                          struct inode *inode)
3942 {
3943         int ret;
3944
3945         ret = btrfs_update_inode(trans, root, inode);
3946         if (ret == -ENOSPC)
3947                 return btrfs_update_inode_item(trans, root, inode);
3948         return ret;
3949 }
3950
3951 /*
3952  * unlink helper that gets used here in inode.c and in the tree logging
3953  * recovery code.  It remove a link in a directory with a given name, and
3954  * also drops the back refs in the inode to the directory
3955  */
3956 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3957                                 struct btrfs_root *root,
3958                                 struct inode *dir, struct inode *inode,
3959                                 const char *name, int name_len)
3960 {
3961         struct btrfs_path *path;
3962         int ret = 0;
3963         struct extent_buffer *leaf;
3964         struct btrfs_dir_item *di;
3965         struct btrfs_key key;
3966         u64 index;
3967         u64 ino = btrfs_ino(inode);
3968         u64 dir_ino = btrfs_ino(dir);
3969
3970         path = btrfs_alloc_path();
3971         if (!path) {
3972                 ret = -ENOMEM;
3973                 goto out;
3974         }
3975
3976         path->leave_spinning = 1;
3977         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3978                                     name, name_len, -1);
3979         if (IS_ERR(di)) {
3980                 ret = PTR_ERR(di);
3981                 goto err;
3982         }
3983         if (!di) {
3984                 ret = -ENOENT;
3985                 goto err;
3986         }
3987         leaf = path->nodes[0];
3988         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3989         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3990         if (ret)
3991                 goto err;
3992         btrfs_release_path(path);
3993
3994         /*
3995          * If we don't have dir index, we have to get it by looking up
3996          * the inode ref, since we get the inode ref, remove it directly,
3997          * it is unnecessary to do delayed deletion.
3998          *
3999          * But if we have dir index, needn't search inode ref to get it.
4000          * Since the inode ref is close to the inode item, it is better
4001          * that we delay to delete it, and just do this deletion when
4002          * we update the inode item.
4003          */
4004         if (BTRFS_I(inode)->dir_index) {
4005                 ret = btrfs_delayed_delete_inode_ref(inode);
4006                 if (!ret) {
4007                         index = BTRFS_I(inode)->dir_index;
4008                         goto skip_backref;
4009                 }
4010         }
4011
4012         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
4013                                   dir_ino, &index);
4014         if (ret) {
4015                 btrfs_info(root->fs_info,
4016                         "failed to delete reference to %.*s, inode %llu parent %llu",
4017                         name_len, name, ino, dir_ino);
4018                 btrfs_abort_transaction(trans, root, ret);
4019                 goto err;
4020         }
4021 skip_backref:
4022         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4023         if (ret) {
4024                 btrfs_abort_transaction(trans, root, ret);
4025                 goto err;
4026         }
4027
4028         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
4029                                          inode, dir_ino);
4030         if (ret != 0 && ret != -ENOENT) {
4031                 btrfs_abort_transaction(trans, root, ret);
4032                 goto err;
4033         }
4034
4035         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
4036                                            dir, index);
4037         if (ret == -ENOENT)
4038                 ret = 0;
4039         else if (ret)
4040                 btrfs_abort_transaction(trans, root, ret);
4041 err:
4042         btrfs_free_path(path);
4043         if (ret)
4044                 goto out;
4045
4046         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4047         inode_inc_iversion(inode);
4048         inode_inc_iversion(dir);
4049         inode->i_ctime = dir->i_mtime =
4050                 dir->i_ctime = current_fs_time(inode->i_sb);
4051         ret = btrfs_update_inode(trans, root, dir);
4052 out:
4053         return ret;
4054 }
4055
4056 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4057                        struct btrfs_root *root,
4058                        struct inode *dir, struct inode *inode,
4059                        const char *name, int name_len)
4060 {
4061         int ret;
4062         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4063         if (!ret) {
4064                 drop_nlink(inode);
4065                 ret = btrfs_update_inode(trans, root, inode);
4066         }
4067         return ret;
4068 }
4069
4070 /*
4071  * helper to start transaction for unlink and rmdir.
4072  *
4073  * unlink and rmdir are special in btrfs, they do not always free space, so
4074  * if we cannot make our reservations the normal way try and see if there is
4075  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4076  * allow the unlink to occur.
4077  */
4078 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4079 {
4080         struct btrfs_root *root = BTRFS_I(dir)->root;
4081
4082         /*
4083          * 1 for the possible orphan item
4084          * 1 for the dir item
4085          * 1 for the dir index
4086          * 1 for the inode ref
4087          * 1 for the inode
4088          */
4089         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4090 }
4091
4092 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4093 {
4094         struct btrfs_root *root = BTRFS_I(dir)->root;
4095         struct btrfs_trans_handle *trans;
4096         struct inode *inode = d_inode(dentry);
4097         int ret;
4098
4099         trans = __unlink_start_trans(dir);
4100         if (IS_ERR(trans))
4101                 return PTR_ERR(trans);
4102
4103         btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
4104
4105         ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4106                                  dentry->d_name.name, dentry->d_name.len);
4107         if (ret)
4108                 goto out;
4109
4110         if (inode->i_nlink == 0) {
4111                 ret = btrfs_orphan_add(trans, inode);
4112                 if (ret)
4113                         goto out;
4114         }
4115
4116 out:
4117         btrfs_end_transaction(trans, root);
4118         btrfs_btree_balance_dirty(root);
4119         return ret;
4120 }
4121
4122 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4123                         struct btrfs_root *root,
4124                         struct inode *dir, u64 objectid,
4125                         const char *name, int name_len)
4126 {
4127         struct btrfs_path *path;
4128         struct extent_buffer *leaf;
4129         struct btrfs_dir_item *di;
4130         struct btrfs_key key;
4131         u64 index;
4132         int ret;
4133         u64 dir_ino = btrfs_ino(dir);
4134
4135         path = btrfs_alloc_path();
4136         if (!path)
4137                 return -ENOMEM;
4138
4139         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4140                                    name, name_len, -1);
4141         if (IS_ERR_OR_NULL(di)) {
4142                 if (!di)
4143                         ret = -ENOENT;
4144                 else
4145                         ret = PTR_ERR(di);
4146                 goto out;
4147         }
4148
4149         leaf = path->nodes[0];
4150         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4151         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4152         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4153         if (ret) {
4154                 btrfs_abort_transaction(trans, root, ret);
4155                 goto out;
4156         }
4157         btrfs_release_path(path);
4158
4159         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4160                                  objectid, root->root_key.objectid,
4161                                  dir_ino, &index, name, name_len);
4162         if (ret < 0) {
4163                 if (ret != -ENOENT) {
4164                         btrfs_abort_transaction(trans, root, ret);
4165                         goto out;
4166                 }
4167                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4168                                                  name, name_len);
4169                 if (IS_ERR_OR_NULL(di)) {
4170                         if (!di)
4171                                 ret = -ENOENT;
4172                         else
4173                                 ret = PTR_ERR(di);
4174                         btrfs_abort_transaction(trans, root, ret);
4175                         goto out;
4176                 }
4177
4178                 leaf = path->nodes[0];
4179                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4180                 btrfs_release_path(path);
4181                 index = key.offset;
4182         }
4183         btrfs_release_path(path);
4184
4185         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4186         if (ret) {
4187                 btrfs_abort_transaction(trans, root, ret);
4188                 goto out;
4189         }
4190
4191         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4192         inode_inc_iversion(dir);
4193         dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
4194         ret = btrfs_update_inode_fallback(trans, root, dir);
4195         if (ret)
4196                 btrfs_abort_transaction(trans, root, ret);
4197 out:
4198         btrfs_free_path(path);
4199         return ret;
4200 }
4201
4202 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4203 {
4204         struct inode *inode = d_inode(dentry);
4205         int err = 0;
4206         struct btrfs_root *root = BTRFS_I(dir)->root;
4207         struct btrfs_trans_handle *trans;
4208
4209         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4210                 return -ENOTEMPTY;
4211         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
4212                 return -EPERM;
4213
4214         trans = __unlink_start_trans(dir);
4215         if (IS_ERR(trans))
4216                 return PTR_ERR(trans);
4217
4218         if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4219                 err = btrfs_unlink_subvol(trans, root, dir,
4220                                           BTRFS_I(inode)->location.objectid,
4221                                           dentry->d_name.name,
4222                                           dentry->d_name.len);
4223                 goto out;
4224         }
4225
4226         err = btrfs_orphan_add(trans, inode);
4227         if (err)
4228                 goto out;
4229
4230         /* now the directory is empty */
4231         err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4232                                  dentry->d_name.name, dentry->d_name.len);
4233         if (!err)
4234                 btrfs_i_size_write(inode, 0);
4235 out:
4236         btrfs_end_transaction(trans, root);
4237         btrfs_btree_balance_dirty(root);
4238
4239         return err;
4240 }
4241
4242 static int truncate_space_check(struct btrfs_trans_handle *trans,
4243                                 struct btrfs_root *root,
4244                                 u64 bytes_deleted)
4245 {
4246         int ret;
4247
4248         /*
4249          * This is only used to apply pressure to the enospc system, we don't
4250          * intend to use this reservation at all.
4251          */
4252         bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
4253         bytes_deleted *= root->nodesize;
4254         ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
4255                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4256         if (!ret) {
4257                 trace_btrfs_space_reservation(root->fs_info, "transaction",
4258                                               trans->transid,
4259                                               bytes_deleted, 1);
4260                 trans->bytes_reserved += bytes_deleted;
4261         }
4262         return ret;
4263
4264 }
4265
4266 static int truncate_inline_extent(struct inode *inode,
4267                                   struct btrfs_path *path,
4268                                   struct btrfs_key *found_key,
4269                                   const u64 item_end,
4270                                   const u64 new_size)
4271 {
4272         struct extent_buffer *leaf = path->nodes[0];
4273         int slot = path->slots[0];
4274         struct btrfs_file_extent_item *fi;
4275         u32 size = (u32)(new_size - found_key->offset);
4276         struct btrfs_root *root = BTRFS_I(inode)->root;
4277
4278         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4279
4280         if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4281                 loff_t offset = new_size;
4282                 loff_t page_end = ALIGN(offset, PAGE_SIZE);
4283
4284                 /*
4285                  * Zero out the remaining of the last page of our inline extent,
4286                  * instead of directly truncating our inline extent here - that
4287                  * would be much more complex (decompressing all the data, then
4288                  * compressing the truncated data, which might be bigger than
4289                  * the size of the inline extent, resize the extent, etc).
4290                  * We release the path because to get the page we might need to
4291                  * read the extent item from disk (data not in the page cache).
4292                  */
4293                 btrfs_release_path(path);
4294                 return btrfs_truncate_block(inode, offset, page_end - offset,
4295                                         0);
4296         }
4297
4298         btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4299         size = btrfs_file_extent_calc_inline_size(size);
4300         btrfs_truncate_item(root, path, size, 1);
4301
4302         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4303                 inode_sub_bytes(inode, item_end + 1 - new_size);
4304
4305         return 0;
4306 }
4307
4308 /*
4309  * this can truncate away extent items, csum items and directory items.
4310  * It starts at a high offset and removes keys until it can't find
4311  * any higher than new_size
4312  *
4313  * csum items that cross the new i_size are truncated to the new size
4314  * as well.
4315  *
4316  * min_type is the minimum key type to truncate down to.  If set to 0, this
4317  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4318  */
4319 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4320                                struct btrfs_root *root,
4321                                struct inode *inode,
4322                                u64 new_size, u32 min_type)
4323 {
4324         struct btrfs_path *path;
4325         struct extent_buffer *leaf;
4326         struct btrfs_file_extent_item *fi;
4327         struct btrfs_key key;
4328         struct btrfs_key found_key;
4329         u64 extent_start = 0;
4330         u64 extent_num_bytes = 0;
4331         u64 extent_offset = 0;
4332         u64 item_end = 0;
4333         u64 last_size = new_size;
4334         u32 found_type = (u8)-1;
4335         int found_extent;
4336         int del_item;
4337         int pending_del_nr = 0;
4338         int pending_del_slot = 0;
4339         int extent_type = -1;
4340         int ret;
4341         int err = 0;
4342         u64 ino = btrfs_ino(inode);
4343         u64 bytes_deleted = 0;
4344         bool be_nice = 0;
4345         bool should_throttle = 0;
4346         bool should_end = 0;
4347
4348         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4349
4350         /*
4351          * for non-free space inodes and ref cows, we want to back off from
4352          * time to time
4353          */
4354         if (!btrfs_is_free_space_inode(inode) &&
4355             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4356                 be_nice = 1;
4357
4358         path = btrfs_alloc_path();
4359         if (!path)
4360                 return -ENOMEM;
4361         path->reada = READA_BACK;
4362
4363         /*
4364          * We want to drop from the next block forward in case this new size is
4365          * not block aligned since we will be keeping the last block of the
4366          * extent just the way it is.
4367          */
4368         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4369             root == root->fs_info->tree_root)
4370                 btrfs_drop_extent_cache(inode, ALIGN(new_size,
4371                                         root->sectorsize), (u64)-1, 0);
4372
4373         /*
4374          * This function is also used to drop the items in the log tree before
4375          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4376          * it is used to drop the loged items. So we shouldn't kill the delayed
4377          * items.
4378          */
4379         if (min_type == 0 && root == BTRFS_I(inode)->root)
4380                 btrfs_kill_delayed_inode_items(inode);
4381
4382         key.objectid = ino;
4383         key.offset = (u64)-1;
4384         key.type = (u8)-1;
4385
4386 search_again:
4387         /*
4388          * with a 16K leaf size and 128MB extents, you can actually queue
4389          * up a huge file in a single leaf.  Most of the time that
4390          * bytes_deleted is > 0, it will be huge by the time we get here
4391          */
4392         if (be_nice && bytes_deleted > SZ_32M) {
4393                 if (btrfs_should_end_transaction(trans, root)) {
4394                         err = -EAGAIN;
4395                         goto error;
4396                 }
4397         }
4398
4399
4400         path->leave_spinning = 1;
4401         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4402         if (ret < 0) {
4403                 err = ret;
4404                 goto out;
4405         }
4406
4407         if (ret > 0) {
4408                 /* there are no items in the tree for us to truncate, we're
4409                  * done
4410                  */
4411                 if (path->slots[0] == 0)
4412                         goto out;
4413                 path->slots[0]--;
4414         }
4415
4416         while (1) {
4417                 fi = NULL;
4418                 leaf = path->nodes[0];
4419                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4420                 found_type = found_key.type;
4421
4422                 if (found_key.objectid != ino)
4423                         break;
4424
4425                 if (found_type < min_type)
4426                         break;
4427
4428                 item_end = found_key.offset;
4429                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4430                         fi = btrfs_item_ptr(leaf, path->slots[0],
4431                                             struct btrfs_file_extent_item);
4432                         extent_type = btrfs_file_extent_type(leaf, fi);
4433                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4434                                 item_end +=
4435                                     btrfs_file_extent_num_bytes(leaf, fi);
4436                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4437                                 item_end += btrfs_file_extent_inline_len(leaf,
4438                                                          path->slots[0], fi);
4439                         }
4440                         item_end--;
4441                 }
4442                 if (found_type > min_type) {
4443                         del_item = 1;
4444                 } else {
4445                         if (item_end < new_size)
4446                                 break;
4447                         if (found_key.offset >= new_size)
4448                                 del_item = 1;
4449                         else
4450                                 del_item = 0;
4451                 }
4452                 found_extent = 0;
4453                 /* FIXME, shrink the extent if the ref count is only 1 */
4454                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4455                         goto delete;
4456
4457                 if (del_item)
4458                         last_size = found_key.offset;
4459                 else
4460                         last_size = new_size;
4461
4462                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4463                         u64 num_dec;
4464                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4465                         if (!del_item) {
4466                                 u64 orig_num_bytes =
4467                                         btrfs_file_extent_num_bytes(leaf, fi);
4468                                 extent_num_bytes = ALIGN(new_size -
4469                                                 found_key.offset,
4470                                                 root->sectorsize);
4471                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4472                                                          extent_num_bytes);
4473                                 num_dec = (orig_num_bytes -
4474                                            extent_num_bytes);
4475                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4476                                              &root->state) &&
4477                                     extent_start != 0)
4478                                         inode_sub_bytes(inode, num_dec);
4479                                 btrfs_mark_buffer_dirty(leaf);
4480                         } else {
4481                                 extent_num_bytes =
4482                                         btrfs_file_extent_disk_num_bytes(leaf,
4483                                                                          fi);
4484                                 extent_offset = found_key.offset -
4485                                         btrfs_file_extent_offset(leaf, fi);
4486
4487                                 /* FIXME blocksize != 4096 */
4488                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4489                                 if (extent_start != 0) {
4490                                         found_extent = 1;
4491                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4492                                                      &root->state))
4493                                                 inode_sub_bytes(inode, num_dec);
4494                                 }
4495                         }
4496                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4497                         /*
4498                          * we can't truncate inline items that have had
4499                          * special encodings
4500                          */
4501                         if (!del_item &&
4502                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4503                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4504
4505                                 /*
4506                                  * Need to release path in order to truncate a
4507                                  * compressed extent. So delete any accumulated
4508                                  * extent items so far.
4509                                  */
4510                                 if (btrfs_file_extent_compression(leaf, fi) !=
4511                                     BTRFS_COMPRESS_NONE && pending_del_nr) {
4512                                         err = btrfs_del_items(trans, root, path,
4513                                                               pending_del_slot,
4514                                                               pending_del_nr);
4515                                         if (err) {
4516                                                 btrfs_abort_transaction(trans,
4517                                                                         root,
4518                                                                         err);
4519                                                 goto error;
4520                                         }
4521                                         pending_del_nr = 0;
4522                                 }
4523
4524                                 err = truncate_inline_extent(inode, path,
4525                                                              &found_key,
4526                                                              item_end,
4527                                                              new_size);
4528                                 if (err) {
4529                                         btrfs_abort_transaction(trans,
4530                                                                 root, err);
4531                                         goto error;
4532                                 }
4533                         } else if (test_bit(BTRFS_ROOT_REF_COWS,
4534                                             &root->state)) {
4535                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4536                         }
4537                 }
4538 delete:
4539                 if (del_item) {
4540                         if (!pending_del_nr) {
4541                                 /* no pending yet, add ourselves */
4542                                 pending_del_slot = path->slots[0];
4543                                 pending_del_nr = 1;
4544                         } else if (pending_del_nr &&
4545                                    path->slots[0] + 1 == pending_del_slot) {
4546                                 /* hop on the pending chunk */
4547                                 pending_del_nr++;
4548                                 pending_del_slot = path->slots[0];
4549                         } else {
4550                                 BUG();
4551                         }
4552                 } else {
4553                         break;
4554                 }
4555                 should_throttle = 0;
4556
4557                 if (found_extent &&
4558                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4559                      root == root->fs_info->tree_root)) {
4560                         btrfs_set_path_blocking(path);
4561                         bytes_deleted += extent_num_bytes;
4562                         ret = btrfs_free_extent(trans, root, extent_start,
4563                                                 extent_num_bytes, 0,
4564                                                 btrfs_header_owner(leaf),
4565                                                 ino, extent_offset);
4566                         BUG_ON(ret);
4567                         if (btrfs_should_throttle_delayed_refs(trans, root))
4568                                 btrfs_async_run_delayed_refs(root,
4569                                                              trans->transid,
4570                                         trans->delayed_ref_updates * 2, 0);
4571                         if (be_nice) {
4572                                 if (truncate_space_check(trans, root,
4573                                                          extent_num_bytes)) {
4574                                         should_end = 1;
4575                                 }
4576                                 if (btrfs_should_throttle_delayed_refs(trans,
4577                                                                        root)) {
4578                                         should_throttle = 1;
4579                                 }
4580                         }
4581                 }
4582
4583                 if (found_type == BTRFS_INODE_ITEM_KEY)
4584                         break;
4585
4586                 if (path->slots[0] == 0 ||
4587                     path->slots[0] != pending_del_slot ||
4588                     should_throttle || should_end) {
4589                         if (pending_del_nr) {
4590                                 ret = btrfs_del_items(trans, root, path,
4591                                                 pending_del_slot,
4592                                                 pending_del_nr);
4593                                 if (ret) {
4594                                         btrfs_abort_transaction(trans,
4595                                                                 root, ret);
4596                                         goto error;
4597                                 }
4598                                 pending_del_nr = 0;
4599                         }
4600                         btrfs_release_path(path);
4601                         if (should_throttle) {
4602                                 unsigned long updates = trans->delayed_ref_updates;
4603                                 if (updates) {
4604                                         trans->delayed_ref_updates = 0;
4605                                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4606                                         if (ret && !err)
4607                                                 err = ret;
4608                                 }
4609                         }
4610                         /*
4611                          * if we failed to refill our space rsv, bail out
4612                          * and let the transaction restart
4613                          */
4614                         if (should_end) {
4615                                 err = -EAGAIN;
4616                                 goto error;
4617                         }
4618                         goto search_again;
4619                 } else {
4620                         path->slots[0]--;
4621                 }
4622         }
4623 out:
4624         if (pending_del_nr) {
4625                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4626                                       pending_del_nr);
4627                 if (ret)
4628                         btrfs_abort_transaction(trans, root, ret);
4629         }
4630 error:
4631         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4632                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4633
4634         btrfs_free_path(path);
4635
4636         if (be_nice && bytes_deleted > SZ_32M) {
4637                 unsigned long updates = trans->delayed_ref_updates;
4638                 if (updates) {
4639                         trans->delayed_ref_updates = 0;
4640                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4641                         if (ret && !err)
4642                                 err = ret;
4643                 }
4644         }
4645         return err;
4646 }
4647
4648 /*
4649  * btrfs_truncate_block - read, zero a chunk and write a block
4650  * @inode - inode that we're zeroing
4651  * @from - the offset to start zeroing
4652  * @len - the length to zero, 0 to zero the entire range respective to the
4653  *      offset
4654  * @front - zero up to the offset instead of from the offset on
4655  *
4656  * This will find the block for the "from" offset and cow the block and zero the
4657  * part we want to zero.  This is used with truncate and hole punching.
4658  */
4659 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
4660                         int front)
4661 {
4662         struct address_space *mapping = inode->i_mapping;
4663         struct btrfs_root *root = BTRFS_I(inode)->root;
4664         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4665         struct btrfs_ordered_extent *ordered;
4666         struct extent_state *cached_state = NULL;
4667         char *kaddr;
4668         u32 blocksize = root->sectorsize;
4669         pgoff_t index = from >> PAGE_SHIFT;
4670         unsigned offset = from & (blocksize - 1);
4671         struct page *page;
4672         gfp_t mask = btrfs_alloc_write_mask(mapping);
4673         int ret = 0;
4674         u64 block_start;
4675         u64 block_end;
4676
4677         if ((offset & (blocksize - 1)) == 0 &&
4678             (!len || ((len & (blocksize - 1)) == 0)))
4679                 goto out;
4680
4681         ret = btrfs_delalloc_reserve_space(inode,
4682                         round_down(from, blocksize), blocksize);
4683         if (ret)
4684                 goto out;
4685
4686 again:
4687         page = find_or_create_page(mapping, index, mask);
4688         if (!page) {
4689                 btrfs_delalloc_release_space(inode,
4690                                 round_down(from, blocksize),
4691                                 blocksize);
4692                 ret = -ENOMEM;
4693                 goto out;
4694         }
4695
4696         block_start = round_down(from, blocksize);
4697         block_end = block_start + blocksize - 1;
4698
4699         if (!PageUptodate(page)) {
4700                 ret = btrfs_readpage(NULL, page);
4701                 lock_page(page);
4702                 if (page->mapping != mapping) {
4703                         unlock_page(page);
4704                         put_page(page);
4705                         goto again;
4706                 }
4707                 if (!PageUptodate(page)) {
4708                         ret = -EIO;
4709                         goto out_unlock;
4710                 }
4711         }
4712         wait_on_page_writeback(page);
4713
4714         lock_extent_bits(io_tree, block_start, block_end, &cached_state);
4715         set_page_extent_mapped(page);
4716
4717         ordered = btrfs_lookup_ordered_extent(inode, block_start);
4718         if (ordered) {
4719                 unlock_extent_cached(io_tree, block_start, block_end,
4720                                      &cached_state, GFP_NOFS);
4721                 unlock_page(page);
4722                 put_page(page);
4723                 btrfs_start_ordered_extent(inode, ordered, 1);
4724                 btrfs_put_ordered_extent(ordered);
4725                 goto again;
4726         }
4727
4728         clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
4729                           EXTENT_DIRTY | EXTENT_DELALLOC |
4730                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4731                           0, 0, &cached_state, GFP_NOFS);
4732
4733         ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
4734                                         &cached_state);
4735         if (ret) {
4736                 unlock_extent_cached(io_tree, block_start, block_end,
4737                                      &cached_state, GFP_NOFS);
4738                 goto out_unlock;
4739         }
4740
4741         if (offset != blocksize) {
4742                 if (!len)
4743                         len = blocksize - offset;
4744                 kaddr = kmap(page);
4745                 if (front)
4746                         memset(kaddr + (block_start - page_offset(page)),
4747                                 0, offset);
4748                 else
4749                         memset(kaddr + (block_start - page_offset(page)) +  offset,
4750                                 0, len);
4751                 flush_dcache_page(page);
4752                 kunmap(page);
4753         }
4754         ClearPageChecked(page);
4755         set_page_dirty(page);
4756         unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
4757                              GFP_NOFS);
4758
4759 out_unlock:
4760         if (ret)
4761                 btrfs_delalloc_release_space(inode, block_start,
4762                                              blocksize);
4763         unlock_page(page);
4764         put_page(page);
4765 out:
4766         return ret;
4767 }
4768
4769 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4770                              u64 offset, u64 len)
4771 {
4772         struct btrfs_trans_handle *trans;
4773         int ret;
4774
4775         /*
4776          * Still need to make sure the inode looks like it's been updated so
4777          * that any holes get logged if we fsync.
4778          */
4779         if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4780                 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4781                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4782                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4783                 return 0;
4784         }
4785
4786         /*
4787          * 1 - for the one we're dropping
4788          * 1 - for the one we're adding
4789          * 1 - for updating the inode.
4790          */
4791         trans = btrfs_start_transaction(root, 3);
4792         if (IS_ERR(trans))
4793                 return PTR_ERR(trans);
4794
4795         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4796         if (ret) {
4797                 btrfs_abort_transaction(trans, root, ret);
4798                 btrfs_end_transaction(trans, root);
4799                 return ret;
4800         }
4801
4802         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4803                                        0, 0, len, 0, len, 0, 0, 0);
4804         if (ret)
4805                 btrfs_abort_transaction(trans, root, ret);
4806         else
4807                 btrfs_update_inode(trans, root, inode);
4808         btrfs_end_transaction(trans, root);
4809         return ret;
4810 }
4811
4812 /*
4813  * This function puts in dummy file extents for the area we're creating a hole
4814  * for.  So if we are truncating this file to a larger size we need to insert
4815  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4816  * the range between oldsize and size
4817  */
4818 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4819 {
4820         struct btrfs_root *root = BTRFS_I(inode)->root;
4821         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4822         struct extent_map *em = NULL;
4823         struct extent_state *cached_state = NULL;
4824         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4825         u64 hole_start = ALIGN(oldsize, root->sectorsize);
4826         u64 block_end = ALIGN(size, root->sectorsize);
4827         u64 last_byte;
4828         u64 cur_offset;
4829         u64 hole_size;
4830         int err = 0;
4831
4832         /*
4833          * If our size started in the middle of a block we need to zero out the
4834          * rest of the block before we expand the i_size, otherwise we could
4835          * expose stale data.
4836          */
4837         err = btrfs_truncate_block(inode, oldsize, 0, 0);
4838         if (err)
4839                 return err;
4840
4841         if (size <= hole_start)
4842                 return 0;
4843
4844         while (1) {
4845                 struct btrfs_ordered_extent *ordered;
4846
4847                 lock_extent_bits(io_tree, hole_start, block_end - 1,
4848                                  &cached_state);
4849                 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4850                                                      block_end - hole_start);
4851                 if (!ordered)
4852                         break;
4853                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4854                                      &cached_state, GFP_NOFS);
4855                 btrfs_start_ordered_extent(inode, ordered, 1);
4856                 btrfs_put_ordered_extent(ordered);
4857         }
4858
4859         cur_offset = hole_start;
4860         while (1) {
4861                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4862                                 block_end - cur_offset, 0);
4863                 if (IS_ERR(em)) {
4864                         err = PTR_ERR(em);
4865                         em = NULL;
4866                         break;
4867                 }
4868                 last_byte = min(extent_map_end(em), block_end);
4869                 last_byte = ALIGN(last_byte , root->sectorsize);
4870                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4871                         struct extent_map *hole_em;
4872                         hole_size = last_byte - cur_offset;
4873
4874                         err = maybe_insert_hole(root, inode, cur_offset,
4875                                                 hole_size);
4876                         if (err)
4877                                 break;
4878                         btrfs_drop_extent_cache(inode, cur_offset,
4879                                                 cur_offset + hole_size - 1, 0);
4880                         hole_em = alloc_extent_map();
4881                         if (!hole_em) {
4882                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4883                                         &BTRFS_I(inode)->runtime_flags);
4884                                 goto next;
4885                         }
4886                         hole_em->start = cur_offset;
4887                         hole_em->len = hole_size;
4888                         hole_em->orig_start = cur_offset;
4889
4890                         hole_em->block_start = EXTENT_MAP_HOLE;
4891                         hole_em->block_len = 0;
4892                         hole_em->orig_block_len = 0;
4893                         hole_em->ram_bytes = hole_size;
4894                         hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4895                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
4896                         hole_em->generation = root->fs_info->generation;
4897
4898                         while (1) {
4899                                 write_lock(&em_tree->lock);
4900                                 err = add_extent_mapping(em_tree, hole_em, 1);
4901                                 write_unlock(&em_tree->lock);
4902                                 if (err != -EEXIST)
4903                                         break;
4904                                 btrfs_drop_extent_cache(inode, cur_offset,
4905                                                         cur_offset +
4906                                                         hole_size - 1, 0);
4907                         }
4908                         free_extent_map(hole_em);
4909                 }
4910 next:
4911                 free_extent_map(em);
4912                 em = NULL;
4913                 cur_offset = last_byte;
4914                 if (cur_offset >= block_end)
4915                         break;
4916         }
4917         free_extent_map(em);
4918         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4919                              GFP_NOFS);
4920         return err;
4921 }
4922
4923 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4924 {
4925         struct btrfs_root *root = BTRFS_I(inode)->root;
4926         struct btrfs_trans_handle *trans;
4927         loff_t oldsize = i_size_read(inode);
4928         loff_t newsize = attr->ia_size;
4929         int mask = attr->ia_valid;
4930         int ret;
4931
4932         /*
4933          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4934          * special case where we need to update the times despite not having
4935          * these flags set.  For all other operations the VFS set these flags
4936          * explicitly if it wants a timestamp update.
4937          */
4938         if (newsize != oldsize) {
4939                 inode_inc_iversion(inode);
4940                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4941                         inode->i_ctime = inode->i_mtime =
4942                                 current_fs_time(inode->i_sb);
4943         }
4944
4945         if (newsize > oldsize) {
4946                 /*
4947                  * Don't do an expanding truncate while snapshoting is ongoing.
4948                  * This is to ensure the snapshot captures a fully consistent
4949                  * state of this file - if the snapshot captures this expanding
4950                  * truncation, it must capture all writes that happened before
4951                  * this truncation.
4952                  */
4953                 btrfs_wait_for_snapshot_creation(root);
4954                 ret = btrfs_cont_expand(inode, oldsize, newsize);
4955                 if (ret) {
4956                         btrfs_end_write_no_snapshoting(root);
4957                         return ret;
4958                 }
4959
4960                 trans = btrfs_start_transaction(root, 1);
4961                 if (IS_ERR(trans)) {
4962                         btrfs_end_write_no_snapshoting(root);
4963                         return PTR_ERR(trans);
4964                 }
4965
4966                 i_size_write(inode, newsize);
4967                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4968                 pagecache_isize_extended(inode, oldsize, newsize);
4969                 ret = btrfs_update_inode(trans, root, inode);
4970                 btrfs_end_write_no_snapshoting(root);
4971                 btrfs_end_transaction(trans, root);
4972         } else {
4973
4974                 /*
4975                  * We're truncating a file that used to have good data down to
4976                  * zero. Make sure it gets into the ordered flush list so that
4977                  * any new writes get down to disk quickly.
4978                  */
4979                 if (newsize == 0)
4980                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4981                                 &BTRFS_I(inode)->runtime_flags);
4982
4983                 /*
4984                  * 1 for the orphan item we're going to add
4985                  * 1 for the orphan item deletion.
4986                  */
4987                 trans = btrfs_start_transaction(root, 2);
4988                 if (IS_ERR(trans))
4989                         return PTR_ERR(trans);
4990
4991                 /*
4992                  * We need to do this in case we fail at _any_ point during the
4993                  * actual truncate.  Once we do the truncate_setsize we could
4994                  * invalidate pages which forces any outstanding ordered io to
4995                  * be instantly completed which will give us extents that need
4996                  * to be truncated.  If we fail to get an orphan inode down we
4997                  * could have left over extents that were never meant to live,
4998                  * so we need to guarantee from this point on that everything
4999                  * will be consistent.
5000                  */
5001                 ret = btrfs_orphan_add(trans, inode);
5002                 btrfs_end_transaction(trans, root);
5003                 if (ret)
5004                         return ret;
5005
5006                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
5007                 truncate_setsize(inode, newsize);
5008
5009                 /* Disable nonlocked read DIO to avoid the end less truncate */
5010                 btrfs_inode_block_unlocked_dio(inode);
5011                 inode_dio_wait(inode);
5012                 btrfs_inode_resume_unlocked_dio(inode);
5013
5014                 ret = btrfs_truncate(inode);
5015                 if (ret && inode->i_nlink) {
5016                         int err;
5017
5018                         /*
5019                          * failed to truncate, disk_i_size is only adjusted down
5020                          * as we remove extents, so it should represent the true
5021                          * size of the inode, so reset the in memory size and
5022                          * delete our orphan entry.
5023                          */
5024                         trans = btrfs_join_transaction(root);
5025                         if (IS_ERR(trans)) {
5026                                 btrfs_orphan_del(NULL, inode);
5027                                 return ret;
5028                         }
5029                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5030                         err = btrfs_orphan_del(trans, inode);
5031                         if (err)
5032                                 btrfs_abort_transaction(trans, root, err);
5033                         btrfs_end_transaction(trans, root);
5034                 }
5035         }
5036
5037         return ret;
5038 }
5039
5040 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5041 {
5042         struct inode *inode = d_inode(dentry);
5043         struct btrfs_root *root = BTRFS_I(inode)->root;
5044         int err;
5045
5046         if (btrfs_root_readonly(root))
5047                 return -EROFS;
5048
5049         err = inode_change_ok(inode, attr);
5050         if (err)
5051                 return err;
5052
5053         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5054                 err = btrfs_setsize(inode, attr);
5055                 if (err)
5056                         return err;
5057         }
5058
5059         if (attr->ia_valid) {
5060                 setattr_copy(inode, attr);
5061                 inode_inc_iversion(inode);
5062                 err = btrfs_dirty_inode(inode);
5063
5064                 if (!err && attr->ia_valid & ATTR_MODE)
5065                         err = posix_acl_chmod(inode, inode->i_mode);
5066         }
5067
5068         return err;
5069 }
5070
5071 /*
5072  * While truncating the inode pages during eviction, we get the VFS calling
5073  * btrfs_invalidatepage() against each page of the inode. This is slow because
5074  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5075  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5076  * extent_state structures over and over, wasting lots of time.
5077  *
5078  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5079  * those expensive operations on a per page basis and do only the ordered io
5080  * finishing, while we release here the extent_map and extent_state structures,
5081  * without the excessive merging and splitting.
5082  */
5083 static void evict_inode_truncate_pages(struct inode *inode)
5084 {
5085         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5086         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5087         struct rb_node *node;
5088
5089         ASSERT(inode->i_state & I_FREEING);
5090         truncate_inode_pages_final(&inode->i_data);
5091
5092         write_lock(&map_tree->lock);
5093         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5094                 struct extent_map *em;
5095
5096                 node = rb_first(&map_tree->map);
5097                 em = rb_entry(node, struct extent_map, rb_node);
5098                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5099                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5100                 remove_extent_mapping(map_tree, em);
5101                 free_extent_map(em);
5102                 if (need_resched()) {
5103                         write_unlock(&map_tree->lock);
5104                         cond_resched();
5105                         write_lock(&map_tree->lock);
5106                 }
5107         }
5108         write_unlock(&map_tree->lock);
5109
5110         /*
5111          * Keep looping until we have no more ranges in the io tree.
5112          * We can have ongoing bios started by readpages (called from readahead)
5113          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5114          * still in progress (unlocked the pages in the bio but did not yet
5115          * unlocked the ranges in the io tree). Therefore this means some
5116          * ranges can still be locked and eviction started because before
5117          * submitting those bios, which are executed by a separate task (work
5118          * queue kthread), inode references (inode->i_count) were not taken
5119          * (which would be dropped in the end io callback of each bio).
5120          * Therefore here we effectively end up waiting for those bios and
5121          * anyone else holding locked ranges without having bumped the inode's
5122          * reference count - if we don't do it, when they access the inode's
5123          * io_tree to unlock a range it may be too late, leading to an
5124          * use-after-free issue.
5125          */
5126         spin_lock(&io_tree->lock);
5127         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5128                 struct extent_state *state;
5129                 struct extent_state *cached_state = NULL;
5130                 u64 start;
5131                 u64 end;
5132
5133                 node = rb_first(&io_tree->state);
5134                 state = rb_entry(node, struct extent_state, rb_node);
5135                 start = state->start;
5136                 end = state->end;
5137                 spin_unlock(&io_tree->lock);
5138
5139                 lock_extent_bits(io_tree, start, end, &cached_state);
5140
5141                 /*
5142                  * If still has DELALLOC flag, the extent didn't reach disk,
5143                  * and its reserved space won't be freed by delayed_ref.
5144                  * So we need to free its reserved space here.
5145                  * (Refer to comment in btrfs_invalidatepage, case 2)
5146                  *
5147                  * Note, end is the bytenr of last byte, so we need + 1 here.
5148                  */
5149                 if (state->state & EXTENT_DELALLOC)
5150                         btrfs_qgroup_free_data(inode, start, end - start + 1);
5151
5152                 clear_extent_bit(io_tree, start, end,
5153                                  EXTENT_LOCKED | EXTENT_DIRTY |
5154                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5155                                  EXTENT_DEFRAG, 1, 1,
5156                                  &cached_state, GFP_NOFS);
5157
5158                 cond_resched();
5159                 spin_lock(&io_tree->lock);
5160         }
5161         spin_unlock(&io_tree->lock);
5162 }
5163
5164 void btrfs_evict_inode(struct inode *inode)
5165 {
5166         struct btrfs_trans_handle *trans;
5167         struct btrfs_root *root = BTRFS_I(inode)->root;
5168         struct btrfs_block_rsv *rsv, *global_rsv;
5169         int steal_from_global = 0;
5170         u64 min_size;
5171         int ret;
5172
5173         trace_btrfs_inode_evict(inode);
5174
5175         if (!root) {
5176                 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5177                 return;
5178         }
5179
5180         min_size = btrfs_calc_trunc_metadata_size(root, 1);
5181
5182         evict_inode_truncate_pages(inode);
5183
5184         if (inode->i_nlink &&
5185             ((btrfs_root_refs(&root->root_item) != 0 &&
5186               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5187              btrfs_is_free_space_inode(inode)))
5188                 goto no_delete;
5189
5190         if (is_bad_inode(inode)) {
5191                 btrfs_orphan_del(NULL, inode);
5192                 goto no_delete;
5193         }
5194         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5195         if (!special_file(inode->i_mode))
5196                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5197
5198         btrfs_free_io_failure_record(inode, 0, (u64)-1);
5199
5200         if (root->fs_info->log_root_recovering) {
5201                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5202                                  &BTRFS_I(inode)->runtime_flags));
5203                 goto no_delete;
5204         }
5205
5206         if (inode->i_nlink > 0) {
5207                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5208                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5209                 goto no_delete;
5210         }
5211
5212         ret = btrfs_commit_inode_delayed_inode(inode);
5213         if (ret) {
5214                 btrfs_orphan_del(NULL, inode);
5215                 goto no_delete;
5216         }
5217
5218         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
5219         if (!rsv) {
5220                 btrfs_orphan_del(NULL, inode);
5221                 goto no_delete;
5222         }
5223         rsv->size = min_size;
5224         rsv->failfast = 1;
5225         global_rsv = &root->fs_info->global_block_rsv;
5226
5227         btrfs_i_size_write(inode, 0);
5228
5229         /*
5230          * This is a bit simpler than btrfs_truncate since we've already
5231          * reserved our space for our orphan item in the unlink, so we just
5232          * need to reserve some slack space in case we add bytes and update
5233          * inode item when doing the truncate.
5234          */
5235         while (1) {
5236                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5237                                              BTRFS_RESERVE_FLUSH_LIMIT);
5238
5239                 /*
5240                  * Try and steal from the global reserve since we will
5241                  * likely not use this space anyway, we want to try as
5242                  * hard as possible to get this to work.
5243                  */
5244                 if (ret)
5245                         steal_from_global++;
5246                 else
5247                         steal_from_global = 0;
5248                 ret = 0;
5249
5250                 /*
5251                  * steal_from_global == 0: we reserved stuff, hooray!
5252                  * steal_from_global == 1: we didn't reserve stuff, boo!
5253                  * steal_from_global == 2: we've committed, still not a lot of
5254                  * room but maybe we'll have room in the global reserve this
5255                  * time.
5256                  * steal_from_global == 3: abandon all hope!
5257                  */
5258                 if (steal_from_global > 2) {
5259                         btrfs_warn(root->fs_info,
5260                                 "Could not get space for a delete, will truncate on mount %d",
5261                                 ret);
5262                         btrfs_orphan_del(NULL, inode);
5263                         btrfs_free_block_rsv(root, rsv);
5264                         goto no_delete;
5265                 }
5266
5267                 trans = btrfs_join_transaction(root);
5268                 if (IS_ERR(trans)) {
5269                         btrfs_orphan_del(NULL, inode);
5270                         btrfs_free_block_rsv(root, rsv);
5271                         goto no_delete;
5272                 }
5273
5274                 /*
5275                  * We can't just steal from the global reserve, we need to make
5276                  * sure there is room to do it, if not we need to commit and try
5277                  * again.
5278                  */
5279                 if (steal_from_global) {
5280                         if (!btrfs_check_space_for_delayed_refs(trans, root))
5281                                 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5282                                                               min_size, 0);
5283                         else
5284                                 ret = -ENOSPC;
5285                 }
5286
5287                 /*
5288                  * Couldn't steal from the global reserve, we have too much
5289                  * pending stuff built up, commit the transaction and try it
5290                  * again.
5291                  */
5292                 if (ret) {
5293                         ret = btrfs_commit_transaction(trans, root);
5294                         if (ret) {
5295                                 btrfs_orphan_del(NULL, inode);
5296                                 btrfs_free_block_rsv(root, rsv);
5297                                 goto no_delete;
5298                         }
5299                         continue;
5300                 } else {
5301                         steal_from_global = 0;
5302                 }
5303
5304                 trans->block_rsv = rsv;
5305
5306                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5307                 if (ret != -ENOSPC && ret != -EAGAIN)
5308                         break;
5309
5310                 trans->block_rsv = &root->fs_info->trans_block_rsv;
5311                 btrfs_end_transaction(trans, root);
5312                 trans = NULL;
5313                 btrfs_btree_balance_dirty(root);
5314         }
5315
5316         btrfs_free_block_rsv(root, rsv);
5317
5318         /*
5319          * Errors here aren't a big deal, it just means we leave orphan items
5320          * in the tree.  They will be cleaned up on the next mount.
5321          */
5322         if (ret == 0) {
5323                 trans->block_rsv = root->orphan_block_rsv;
5324                 btrfs_orphan_del(trans, inode);
5325         } else {
5326                 btrfs_orphan_del(NULL, inode);
5327         }
5328
5329         trans->block_rsv = &root->fs_info->trans_block_rsv;
5330         if (!(root == root->fs_info->tree_root ||
5331               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5332                 btrfs_return_ino(root, btrfs_ino(inode));
5333
5334         btrfs_end_transaction(trans, root);
5335         btrfs_btree_balance_dirty(root);
5336 no_delete:
5337         btrfs_remove_delayed_node(inode);
5338         clear_inode(inode);
5339 }
5340
5341 /*
5342  * this returns the key found in the dir entry in the location pointer.
5343  * If no dir entries were found, location->objectid is 0.
5344  */
5345 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5346                                struct btrfs_key *location)
5347 {
5348         const char *name = dentry->d_name.name;
5349         int namelen = dentry->d_name.len;
5350         struct btrfs_dir_item *di;
5351         struct btrfs_path *path;
5352         struct btrfs_root *root = BTRFS_I(dir)->root;
5353         int ret = 0;
5354
5355         path = btrfs_alloc_path();
5356         if (!path)
5357                 return -ENOMEM;
5358
5359         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
5360                                     namelen, 0);
5361         if (IS_ERR(di))
5362                 ret = PTR_ERR(di);
5363
5364         if (IS_ERR_OR_NULL(di))
5365                 goto out_err;
5366
5367         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5368 out:
5369         btrfs_free_path(path);
5370         return ret;
5371 out_err:
5372         location->objectid = 0;
5373         goto out;
5374 }
5375
5376 /*
5377  * when we hit a tree root in a directory, the btrfs part of the inode
5378  * needs to be changed to reflect the root directory of the tree root.  This
5379  * is kind of like crossing a mount point.
5380  */
5381 static int fixup_tree_root_location(struct btrfs_root *root,
5382                                     struct inode *dir,
5383                                     struct dentry *dentry,
5384                                     struct btrfs_key *location,
5385                                     struct btrfs_root **sub_root)
5386 {
5387         struct btrfs_path *path;
5388         struct btrfs_root *new_root;
5389         struct btrfs_root_ref *ref;
5390         struct extent_buffer *leaf;
5391         struct btrfs_key key;
5392         int ret;
5393         int err = 0;
5394
5395         path = btrfs_alloc_path();
5396         if (!path) {
5397                 err = -ENOMEM;
5398                 goto out;
5399         }
5400
5401         err = -ENOENT;
5402         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5403         key.type = BTRFS_ROOT_REF_KEY;
5404         key.offset = location->objectid;
5405
5406         ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
5407                                 0, 0);
5408         if (ret) {
5409                 if (ret < 0)
5410                         err = ret;
5411                 goto out;
5412         }
5413
5414         leaf = path->nodes[0];
5415         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5416         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5417             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5418                 goto out;
5419
5420         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5421                                    (unsigned long)(ref + 1),
5422                                    dentry->d_name.len);
5423         if (ret)
5424                 goto out;
5425
5426         btrfs_release_path(path);
5427
5428         new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
5429         if (IS_ERR(new_root)) {
5430                 err = PTR_ERR(new_root);
5431                 goto out;
5432         }
5433
5434         *sub_root = new_root;
5435         location->objectid = btrfs_root_dirid(&new_root->root_item);
5436         location->type = BTRFS_INODE_ITEM_KEY;
5437         location->offset = 0;
5438         err = 0;
5439 out:
5440         btrfs_free_path(path);
5441         return err;
5442 }
5443
5444 static void inode_tree_add(struct inode *inode)
5445 {
5446         struct btrfs_root *root = BTRFS_I(inode)->root;
5447         struct btrfs_inode *entry;
5448         struct rb_node **p;
5449         struct rb_node *parent;
5450         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5451         u64 ino = btrfs_ino(inode);
5452
5453         if (inode_unhashed(inode))
5454                 return;
5455         parent = NULL;
5456         spin_lock(&root->inode_lock);
5457         p = &root->inode_tree.rb_node;
5458         while (*p) {
5459                 parent = *p;
5460                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5461
5462                 if (ino < btrfs_ino(&entry->vfs_inode))
5463                         p = &parent->rb_left;
5464                 else if (ino > btrfs_ino(&entry->vfs_inode))
5465                         p = &parent->rb_right;
5466                 else {
5467                         WARN_ON(!(entry->vfs_inode.i_state &
5468                                   (I_WILL_FREE | I_FREEING)));
5469                         rb_replace_node(parent, new, &root->inode_tree);
5470                         RB_CLEAR_NODE(parent);
5471                         spin_unlock(&root->inode_lock);
5472                         return;
5473                 }
5474         }
5475         rb_link_node(new, parent, p);
5476         rb_insert_color(new, &root->inode_tree);
5477         spin_unlock(&root->inode_lock);
5478 }
5479
5480 static void inode_tree_del(struct inode *inode)
5481 {
5482         struct btrfs_root *root = BTRFS_I(inode)->root;
5483         int empty = 0;
5484
5485         spin_lock(&root->inode_lock);
5486         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5487                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5488                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5489                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5490         }
5491         spin_unlock(&root->inode_lock);
5492
5493         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5494                 synchronize_srcu(&root->fs_info->subvol_srcu);
5495                 spin_lock(&root->inode_lock);
5496                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5497                 spin_unlock(&root->inode_lock);
5498                 if (empty)
5499                         btrfs_add_dead_root(root);
5500         }
5501 }
5502
5503 void btrfs_invalidate_inodes(struct btrfs_root *root)
5504 {
5505         struct rb_node *node;
5506         struct rb_node *prev;
5507         struct btrfs_inode *entry;
5508         struct inode *inode;
5509         u64 objectid = 0;
5510
5511         if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
5512                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5513
5514         spin_lock(&root->inode_lock);
5515 again:
5516         node = root->inode_tree.rb_node;
5517         prev = NULL;
5518         while (node) {
5519                 prev = node;
5520                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5521
5522                 if (objectid < btrfs_ino(&entry->vfs_inode))
5523                         node = node->rb_left;
5524                 else if (objectid > btrfs_ino(&entry->vfs_inode))
5525                         node = node->rb_right;
5526                 else
5527                         break;
5528         }
5529         if (!node) {
5530                 while (prev) {
5531                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
5532                         if (objectid <= btrfs_ino(&entry->vfs_inode)) {
5533                                 node = prev;
5534                                 break;
5535                         }
5536                         prev = rb_next(prev);
5537                 }
5538         }
5539         while (node) {
5540                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5541                 objectid = btrfs_ino(&entry->vfs_inode) + 1;
5542                 inode = igrab(&entry->vfs_inode);
5543                 if (inode) {
5544                         spin_unlock(&root->inode_lock);
5545                         if (atomic_read(&inode->i_count) > 1)
5546                                 d_prune_aliases(inode);
5547                         /*
5548                          * btrfs_drop_inode will have it removed from
5549                          * the inode cache when its usage count
5550                          * hits zero.
5551                          */
5552                         iput(inode);
5553                         cond_resched();
5554                         spin_lock(&root->inode_lock);
5555                         goto again;
5556                 }
5557
5558                 if (cond_resched_lock(&root->inode_lock))
5559                         goto again;
5560
5561                 node = rb_next(node);
5562         }
5563         spin_unlock(&root->inode_lock);
5564 }
5565
5566 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5567 {
5568         struct btrfs_iget_args *args = p;
5569         inode->i_ino = args->location->objectid;
5570         memcpy(&BTRFS_I(inode)->location, args->location,
5571                sizeof(*args->location));
5572         BTRFS_I(inode)->root = args->root;
5573         return 0;
5574 }
5575
5576 static int btrfs_find_actor(struct inode *inode, void *opaque)
5577 {
5578         struct btrfs_iget_args *args = opaque;
5579         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5580                 args->root == BTRFS_I(inode)->root;
5581 }
5582
5583 static struct inode *btrfs_iget_locked(struct super_block *s,
5584                                        struct btrfs_key *location,
5585                                        struct btrfs_root *root)
5586 {
5587         struct inode *inode;
5588         struct btrfs_iget_args args;
5589         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5590
5591         args.location = location;
5592         args.root = root;
5593
5594         inode = iget5_locked(s, hashval, btrfs_find_actor,
5595                              btrfs_init_locked_inode,
5596                              (void *)&args);
5597         return inode;
5598 }
5599
5600 /* Get an inode object given its location and corresponding root.
5601  * Returns in *is_new if the inode was read from disk
5602  */
5603 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5604                          struct btrfs_root *root, int *new)
5605 {
5606         struct inode *inode;
5607
5608         inode = btrfs_iget_locked(s, location, root);
5609         if (!inode)
5610                 return ERR_PTR(-ENOMEM);
5611
5612         if (inode->i_state & I_NEW) {
5613                 btrfs_read_locked_inode(inode);
5614                 if (!is_bad_inode(inode)) {
5615                         inode_tree_add(inode);
5616                         unlock_new_inode(inode);
5617                         if (new)
5618                                 *new = 1;
5619                 } else {
5620                         unlock_new_inode(inode);
5621                         iput(inode);
5622                         inode = ERR_PTR(-ESTALE);
5623                 }
5624         }
5625
5626         return inode;
5627 }
5628
5629 static struct inode *new_simple_dir(struct super_block *s,
5630                                     struct btrfs_key *key,
5631                                     struct btrfs_root *root)
5632 {
5633         struct inode *inode = new_inode(s);
5634
5635         if (!inode)
5636                 return ERR_PTR(-ENOMEM);
5637
5638         BTRFS_I(inode)->root = root;
5639         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5640         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5641
5642         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5643         inode->i_op = &btrfs_dir_ro_inode_operations;
5644         inode->i_fop = &simple_dir_operations;
5645         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5646         inode->i_mtime = current_fs_time(inode->i_sb);
5647         inode->i_atime = inode->i_mtime;
5648         inode->i_ctime = inode->i_mtime;
5649         BTRFS_I(inode)->i_otime = inode->i_mtime;
5650
5651         return inode;
5652 }
5653
5654 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5655 {
5656         struct inode *inode;
5657         struct btrfs_root *root = BTRFS_I(dir)->root;
5658         struct btrfs_root *sub_root = root;
5659         struct btrfs_key location;
5660         int index;
5661         int ret = 0;
5662
5663         if (dentry->d_name.len > BTRFS_NAME_LEN)
5664                 return ERR_PTR(-ENAMETOOLONG);
5665
5666         ret = btrfs_inode_by_name(dir, dentry, &location);
5667         if (ret < 0)
5668                 return ERR_PTR(ret);
5669
5670         if (location.objectid == 0)
5671                 return ERR_PTR(-ENOENT);
5672
5673         if (location.type == BTRFS_INODE_ITEM_KEY) {
5674                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5675                 return inode;
5676         }
5677
5678         BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5679
5680         index = srcu_read_lock(&root->fs_info->subvol_srcu);
5681         ret = fixup_tree_root_location(root, dir, dentry,
5682                                        &location, &sub_root);
5683         if (ret < 0) {
5684                 if (ret != -ENOENT)
5685                         inode = ERR_PTR(ret);
5686                 else
5687                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5688         } else {
5689                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5690         }
5691         srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5692
5693         if (!IS_ERR(inode) && root != sub_root) {
5694                 down_read(&root->fs_info->cleanup_work_sem);
5695                 if (!(inode->i_sb->s_flags & MS_RDONLY))
5696                         ret = btrfs_orphan_cleanup(sub_root);
5697                 up_read(&root->fs_info->cleanup_work_sem);
5698                 if (ret) {
5699                         iput(inode);
5700                         inode = ERR_PTR(ret);
5701                 }
5702         }
5703
5704         return inode;
5705 }
5706
5707 static int btrfs_dentry_delete(const struct dentry *dentry)
5708 {
5709         struct btrfs_root *root;
5710         struct inode *inode = d_inode(dentry);
5711
5712         if (!inode && !IS_ROOT(dentry))
5713                 inode = d_inode(dentry->d_parent);
5714
5715         if (inode) {
5716                 root = BTRFS_I(inode)->root;
5717                 if (btrfs_root_refs(&root->root_item) == 0)
5718                         return 1;
5719
5720                 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5721                         return 1;
5722         }
5723         return 0;
5724 }
5725
5726 static void btrfs_dentry_release(struct dentry *dentry)
5727 {
5728         kfree(dentry->d_fsdata);
5729 }
5730
5731 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5732                                    unsigned int flags)
5733 {
5734         struct inode *inode;
5735
5736         inode = btrfs_lookup_dentry(dir, dentry);
5737         if (IS_ERR(inode)) {
5738                 if (PTR_ERR(inode) == -ENOENT)
5739                         inode = NULL;
5740                 else
5741                         return ERR_CAST(inode);
5742         }
5743
5744         return d_splice_alias(inode, dentry);
5745 }
5746
5747 unsigned char btrfs_filetype_table[] = {
5748         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5749 };
5750
5751 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5752 {
5753         struct inode *inode = file_inode(file);
5754         struct btrfs_root *root = BTRFS_I(inode)->root;
5755         struct btrfs_item *item;
5756         struct btrfs_dir_item *di;
5757         struct btrfs_key key;
5758         struct btrfs_key found_key;
5759         struct btrfs_path *path;
5760         struct list_head ins_list;
5761         struct list_head del_list;
5762         int ret;
5763         struct extent_buffer *leaf;
5764         int slot;
5765         unsigned char d_type;
5766         int over = 0;
5767         u32 di_cur;
5768         u32 di_total;
5769         u32 di_len;
5770         int key_type = BTRFS_DIR_INDEX_KEY;
5771         char tmp_name[32];
5772         char *name_ptr;
5773         int name_len;
5774         int is_curr = 0;        /* ctx->pos points to the current index? */
5775         bool emitted;
5776         bool put = false;
5777
5778         /* FIXME, use a real flag for deciding about the key type */
5779         if (root->fs_info->tree_root == root)
5780                 key_type = BTRFS_DIR_ITEM_KEY;
5781
5782         if (!dir_emit_dots(file, ctx))
5783                 return 0;
5784
5785         path = btrfs_alloc_path();
5786         if (!path)
5787                 return -ENOMEM;
5788
5789         path->reada = READA_FORWARD;
5790
5791         if (key_type == BTRFS_DIR_INDEX_KEY) {
5792                 INIT_LIST_HEAD(&ins_list);
5793                 INIT_LIST_HEAD(&del_list);
5794                 put = btrfs_readdir_get_delayed_items(inode, &ins_list,
5795                                                       &del_list);
5796         }
5797
5798         key.type = key_type;
5799         key.offset = ctx->pos;
5800         key.objectid = btrfs_ino(inode);
5801
5802         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5803         if (ret < 0)
5804                 goto err;
5805
5806         emitted = false;
5807         while (1) {
5808                 leaf = path->nodes[0];
5809                 slot = path->slots[0];
5810                 if (slot >= btrfs_header_nritems(leaf)) {
5811                         ret = btrfs_next_leaf(root, path);
5812                         if (ret < 0)
5813                                 goto err;
5814                         else if (ret > 0)
5815                                 break;
5816                         continue;
5817                 }
5818
5819                 item = btrfs_item_nr(slot);
5820                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5821
5822                 if (found_key.objectid != key.objectid)
5823                         break;
5824                 if (found_key.type != key_type)
5825                         break;
5826                 if (found_key.offset < ctx->pos)
5827                         goto next;
5828                 if (key_type == BTRFS_DIR_INDEX_KEY &&
5829                     btrfs_should_delete_dir_index(&del_list,
5830                                                   found_key.offset))
5831                         goto next;
5832
5833                 ctx->pos = found_key.offset;
5834                 is_curr = 1;
5835
5836                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5837                 di_cur = 0;
5838                 di_total = btrfs_item_size(leaf, item);
5839
5840                 while (di_cur < di_total) {
5841                         struct btrfs_key location;
5842
5843                         if (verify_dir_item(root, leaf, di))
5844                                 break;
5845
5846                         name_len = btrfs_dir_name_len(leaf, di);
5847                         if (name_len <= sizeof(tmp_name)) {
5848                                 name_ptr = tmp_name;
5849                         } else {
5850                                 name_ptr = kmalloc(name_len, GFP_KERNEL);
5851                                 if (!name_ptr) {
5852                                         ret = -ENOMEM;
5853                                         goto err;
5854                                 }
5855                         }
5856                         read_extent_buffer(leaf, name_ptr,
5857                                            (unsigned long)(di + 1), name_len);
5858
5859                         d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5860                         btrfs_dir_item_key_to_cpu(leaf, di, &location);
5861
5862
5863                         /* is this a reference to our own snapshot? If so
5864                          * skip it.
5865                          *
5866                          * In contrast to old kernels, we insert the snapshot's
5867                          * dir item and dir index after it has been created, so
5868                          * we won't find a reference to our own snapshot. We
5869                          * still keep the following code for backward
5870                          * compatibility.
5871                          */
5872                         if (location.type == BTRFS_ROOT_ITEM_KEY &&
5873                             location.objectid == root->root_key.objectid) {
5874                                 over = 0;
5875                                 goto skip;
5876                         }
5877                         over = !dir_emit(ctx, name_ptr, name_len,
5878                                        location.objectid, d_type);
5879
5880 skip:
5881                         if (name_ptr != tmp_name)
5882                                 kfree(name_ptr);
5883
5884                         if (over)
5885                                 goto nopos;
5886                         emitted = true;
5887                         di_len = btrfs_dir_name_len(leaf, di) +
5888                                  btrfs_dir_data_len(leaf, di) + sizeof(*di);
5889                         di_cur += di_len;
5890                         di = (struct btrfs_dir_item *)((char *)di + di_len);
5891                 }
5892 next:
5893                 path->slots[0]++;
5894         }
5895
5896         if (key_type == BTRFS_DIR_INDEX_KEY) {
5897                 if (is_curr)
5898                         ctx->pos++;
5899                 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
5900                 if (ret)
5901                         goto nopos;
5902         }
5903
5904         /*
5905          * If we haven't emitted any dir entry, we must not touch ctx->pos as
5906          * it was was set to the termination value in previous call. We assume
5907          * that "." and ".." were emitted if we reach this point and set the
5908          * termination value as well for an empty directory.
5909          */
5910         if (ctx->pos > 2 && !emitted)
5911                 goto nopos;
5912
5913         /* Reached end of directory/root. Bump pos past the last item. */
5914         ctx->pos++;
5915
5916         /*
5917          * Stop new entries from being returned after we return the last
5918          * entry.
5919          *
5920          * New directory entries are assigned a strictly increasing
5921          * offset.  This means that new entries created during readdir
5922          * are *guaranteed* to be seen in the future by that readdir.
5923          * This has broken buggy programs which operate on names as
5924          * they're returned by readdir.  Until we re-use freed offsets
5925          * we have this hack to stop new entries from being returned
5926          * under the assumption that they'll never reach this huge
5927          * offset.
5928          *
5929          * This is being careful not to overflow 32bit loff_t unless the
5930          * last entry requires it because doing so has broken 32bit apps
5931          * in the past.
5932          */
5933         if (key_type == BTRFS_DIR_INDEX_KEY) {
5934                 if (ctx->pos >= INT_MAX)
5935                         ctx->pos = LLONG_MAX;
5936                 else
5937                         ctx->pos = INT_MAX;
5938         }
5939 nopos:
5940         ret = 0;
5941 err:
5942         if (put)
5943                 btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
5944         btrfs_free_path(path);
5945         return ret;
5946 }
5947
5948 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5949 {
5950         struct btrfs_root *root = BTRFS_I(inode)->root;
5951         struct btrfs_trans_handle *trans;
5952         int ret = 0;
5953         bool nolock = false;
5954
5955         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5956                 return 0;
5957
5958         if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5959                 nolock = true;
5960
5961         if (wbc->sync_mode == WB_SYNC_ALL) {
5962                 if (nolock)
5963                         trans = btrfs_join_transaction_nolock(root);
5964                 else
5965                         trans = btrfs_join_transaction(root);
5966                 if (IS_ERR(trans))
5967                         return PTR_ERR(trans);
5968                 ret = btrfs_commit_transaction(trans, root);
5969         }
5970         return ret;
5971 }
5972
5973 /*
5974  * This is somewhat expensive, updating the tree every time the
5975  * inode changes.  But, it is most likely to find the inode in cache.
5976  * FIXME, needs more benchmarking...there are no reasons other than performance
5977  * to keep or drop this code.
5978  */
5979 static int btrfs_dirty_inode(struct inode *inode)
5980 {
5981         struct btrfs_root *root = BTRFS_I(inode)->root;
5982         struct btrfs_trans_handle *trans;
5983         int ret;
5984
5985         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5986                 return 0;
5987
5988         trans = btrfs_join_transaction(root);
5989         if (IS_ERR(trans))
5990                 return PTR_ERR(trans);
5991
5992         ret = btrfs_update_inode(trans, root, inode);
5993         if (ret && ret == -ENOSPC) {
5994                 /* whoops, lets try again with the full transaction */
5995                 btrfs_end_transaction(trans, root);
5996                 trans = btrfs_start_transaction(root, 1);
5997                 if (IS_ERR(trans))
5998                         return PTR_ERR(trans);
5999
6000                 ret = btrfs_update_inode(trans, root, inode);
6001         }
6002         btrfs_end_transaction(trans, root);
6003         if (BTRFS_I(inode)->delayed_node)
6004                 btrfs_balance_delayed_items(root);
6005
6006         return ret;
6007 }
6008
6009 /*
6010  * This is a copy of file_update_time.  We need this so we can return error on
6011  * ENOSPC for updating the inode in the case of file write and mmap writes.
6012  */
6013 static int btrfs_update_time(struct inode *inode, struct timespec *now,
6014                              int flags)
6015 {
6016         struct btrfs_root *root = BTRFS_I(inode)->root;
6017
6018         if (btrfs_root_readonly(root))
6019                 return -EROFS;
6020
6021         if (flags & S_VERSION)
6022                 inode_inc_iversion(inode);
6023         if (flags & S_CTIME)
6024                 inode->i_ctime = *now;
6025         if (flags & S_MTIME)
6026                 inode->i_mtime = *now;
6027         if (flags & S_ATIME)
6028                 inode->i_atime = *now;
6029         return btrfs_dirty_inode(inode);
6030 }
6031
6032 /*
6033  * find the highest existing sequence number in a directory
6034  * and then set the in-memory index_cnt variable to reflect
6035  * free sequence numbers
6036  */
6037 static int btrfs_set_inode_index_count(struct inode *inode)
6038 {
6039         struct btrfs_root *root = BTRFS_I(inode)->root;
6040         struct btrfs_key key, found_key;
6041         struct btrfs_path *path;
6042         struct extent_buffer *leaf;
6043         int ret;
6044
6045         key.objectid = btrfs_ino(inode);
6046         key.type = BTRFS_DIR_INDEX_KEY;
6047         key.offset = (u64)-1;
6048
6049         path = btrfs_alloc_path();
6050         if (!path)
6051                 return -ENOMEM;
6052
6053         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6054         if (ret < 0)
6055                 goto out;
6056         /* FIXME: we should be able to handle this */
6057         if (ret == 0)
6058                 goto out;
6059         ret = 0;
6060
6061         /*
6062          * MAGIC NUMBER EXPLANATION:
6063          * since we search a directory based on f_pos we have to start at 2
6064          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6065          * else has to start at 2
6066          */
6067         if (path->slots[0] == 0) {
6068                 BTRFS_I(inode)->index_cnt = 2;
6069                 goto out;
6070         }
6071
6072         path->slots[0]--;
6073
6074         leaf = path->nodes[0];
6075         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6076
6077         if (found_key.objectid != btrfs_ino(inode) ||
6078             found_key.type != BTRFS_DIR_INDEX_KEY) {
6079                 BTRFS_I(inode)->index_cnt = 2;
6080                 goto out;
6081         }
6082
6083         BTRFS_I(inode)->index_cnt = found_key.offset + 1;
6084 out:
6085         btrfs_free_path(path);
6086         return ret;
6087 }
6088
6089 /*
6090  * helper to find a free sequence number in a given directory.  This current
6091  * code is very simple, later versions will do smarter things in the btree
6092  */
6093 int btrfs_set_inode_index(struct inode *dir, u64 *index)
6094 {
6095         int ret = 0;
6096
6097         if (BTRFS_I(dir)->index_cnt == (u64)-1) {
6098                 ret = btrfs_inode_delayed_dir_index_count(dir);
6099                 if (ret) {
6100                         ret = btrfs_set_inode_index_count(dir);
6101                         if (ret)
6102                                 return ret;
6103                 }
6104         }
6105
6106         *index = BTRFS_I(dir)->index_cnt;
6107         BTRFS_I(dir)->index_cnt++;
6108
6109         return ret;
6110 }
6111
6112 static int btrfs_insert_inode_locked(struct inode *inode)
6113 {
6114         struct btrfs_iget_args args;
6115         args.location = &BTRFS_I(inode)->location;
6116         args.root = BTRFS_I(inode)->root;
6117
6118         return insert_inode_locked4(inode,
6119                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6120                    btrfs_find_actor, &args);
6121 }
6122
6123 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6124                                      struct btrfs_root *root,
6125                                      struct inode *dir,
6126                                      const char *name, int name_len,
6127                                      u64 ref_objectid, u64 objectid,
6128                                      umode_t mode, u64 *index)
6129 {
6130         struct inode *inode;
6131         struct btrfs_inode_item *inode_item;
6132         struct btrfs_key *location;
6133         struct btrfs_path *path;
6134         struct btrfs_inode_ref *ref;
6135         struct btrfs_key key[2];
6136         u32 sizes[2];
6137         int nitems = name ? 2 : 1;
6138         unsigned long ptr;
6139         int ret;
6140
6141         path = btrfs_alloc_path();
6142         if (!path)
6143                 return ERR_PTR(-ENOMEM);
6144
6145         inode = new_inode(root->fs_info->sb);
6146         if (!inode) {
6147                 btrfs_free_path(path);
6148                 return ERR_PTR(-ENOMEM);
6149         }
6150
6151         /*
6152          * O_TMPFILE, set link count to 0, so that after this point,
6153          * we fill in an inode item with the correct link count.
6154          */
6155         if (!name)
6156                 set_nlink(inode, 0);
6157
6158         /*
6159          * we have to initialize this early, so we can reclaim the inode
6160          * number if we fail afterwards in this function.
6161          */
6162         inode->i_ino = objectid;
6163
6164         if (dir && name) {
6165                 trace_btrfs_inode_request(dir);
6166
6167                 ret = btrfs_set_inode_index(dir, index);
6168                 if (ret) {
6169                         btrfs_free_path(path);
6170                         iput(inode);
6171                         return ERR_PTR(ret);
6172                 }
6173         } else if (dir) {
6174                 *index = 0;
6175         }
6176         /*
6177          * index_cnt is ignored for everything but a dir,
6178          * btrfs_get_inode_index_count has an explanation for the magic
6179          * number
6180          */
6181         BTRFS_I(inode)->index_cnt = 2;
6182         BTRFS_I(inode)->dir_index = *index;
6183         BTRFS_I(inode)->root = root;
6184         BTRFS_I(inode)->generation = trans->transid;
6185         inode->i_generation = BTRFS_I(inode)->generation;
6186
6187         /*
6188          * We could have gotten an inode number from somebody who was fsynced
6189          * and then removed in this same transaction, so let's just set full
6190          * sync since it will be a full sync anyway and this will blow away the
6191          * old info in the log.
6192          */
6193         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6194
6195         key[0].objectid = objectid;
6196         key[0].type = BTRFS_INODE_ITEM_KEY;
6197         key[0].offset = 0;
6198
6199         sizes[0] = sizeof(struct btrfs_inode_item);
6200
6201         if (name) {
6202                 /*
6203                  * Start new inodes with an inode_ref. This is slightly more
6204                  * efficient for small numbers of hard links since they will
6205                  * be packed into one item. Extended refs will kick in if we
6206                  * add more hard links than can fit in the ref item.
6207                  */
6208                 key[1].objectid = objectid;
6209                 key[1].type = BTRFS_INODE_REF_KEY;
6210                 key[1].offset = ref_objectid;
6211
6212                 sizes[1] = name_len + sizeof(*ref);
6213         }
6214
6215         location = &BTRFS_I(inode)->location;
6216         location->objectid = objectid;
6217         location->offset = 0;
6218         location->type = BTRFS_INODE_ITEM_KEY;
6219
6220         ret = btrfs_insert_inode_locked(inode);
6221         if (ret < 0)
6222                 goto fail;
6223
6224         path->leave_spinning = 1;
6225         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6226         if (ret != 0)
6227                 goto fail_unlock;
6228
6229         inode_init_owner(inode, dir, mode);
6230         inode_set_bytes(inode, 0);
6231
6232         inode->i_mtime = current_fs_time(inode->i_sb);
6233         inode->i_atime = inode->i_mtime;
6234         inode->i_ctime = inode->i_mtime;
6235         BTRFS_I(inode)->i_otime = inode->i_mtime;
6236
6237         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6238                                   struct btrfs_inode_item);
6239         memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
6240                              sizeof(*inode_item));
6241         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6242
6243         if (name) {
6244                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6245                                      struct btrfs_inode_ref);
6246                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6247                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6248                 ptr = (unsigned long)(ref + 1);
6249                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6250         }
6251
6252         btrfs_mark_buffer_dirty(path->nodes[0]);
6253         btrfs_free_path(path);
6254
6255         btrfs_inherit_iflags(inode, dir);
6256
6257         if (S_ISREG(mode)) {
6258                 if (btrfs_test_opt(root->fs_info, NODATASUM))
6259                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6260                 if (btrfs_test_opt(root->fs_info, NODATACOW))
6261                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6262                                 BTRFS_INODE_NODATASUM;
6263         }
6264
6265         inode_tree_add(inode);
6266
6267         trace_btrfs_inode_new(inode);
6268         btrfs_set_inode_last_trans(trans, inode);
6269
6270         btrfs_update_root_times(trans, root);
6271
6272         ret = btrfs_inode_inherit_props(trans, inode, dir);
6273         if (ret)
6274                 btrfs_err(root->fs_info,
6275                           "error inheriting props for ino %llu (root %llu): %d",
6276                           btrfs_ino(inode), root->root_key.objectid, ret);
6277
6278         return inode;
6279
6280 fail_unlock:
6281         unlock_new_inode(inode);
6282 fail:
6283         if (dir && name)
6284                 BTRFS_I(dir)->index_cnt--;
6285         btrfs_free_path(path);
6286         iput(inode);
6287         return ERR_PTR(ret);
6288 }
6289
6290 static inline u8 btrfs_inode_type(struct inode *inode)
6291 {
6292         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6293 }
6294
6295 /*
6296  * utility function to add 'inode' into 'parent_inode' with
6297  * a give name and a given sequence number.
6298  * if 'add_backref' is true, also insert a backref from the
6299  * inode to the parent directory.
6300  */
6301 int btrfs_add_link(struct btrfs_trans_handle *trans,
6302                    struct inode *parent_inode, struct inode *inode,
6303                    const char *name, int name_len, int add_backref, u64 index)
6304 {
6305         int ret = 0;
6306         struct btrfs_key key;
6307         struct btrfs_root *root = BTRFS_I(parent_inode)->root;
6308         u64 ino = btrfs_ino(inode);
6309         u64 parent_ino = btrfs_ino(parent_inode);
6310
6311         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6312                 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
6313         } else {
6314                 key.objectid = ino;
6315                 key.type = BTRFS_INODE_ITEM_KEY;
6316                 key.offset = 0;
6317         }
6318
6319         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6320                 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
6321                                          key.objectid, root->root_key.objectid,
6322                                          parent_ino, index, name, name_len);
6323         } else if (add_backref) {
6324                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6325                                              parent_ino, index);
6326         }
6327
6328         /* Nothing to clean up yet */
6329         if (ret)
6330                 return ret;
6331
6332         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6333                                     parent_inode, &key,
6334                                     btrfs_inode_type(inode), index);
6335         if (ret == -EEXIST || ret == -EOVERFLOW)
6336                 goto fail_dir_item;
6337         else if (ret) {
6338                 btrfs_abort_transaction(trans, root, ret);
6339                 return ret;
6340         }
6341
6342         btrfs_i_size_write(parent_inode, parent_inode->i_size +
6343                            name_len * 2);
6344         inode_inc_iversion(parent_inode);
6345         parent_inode->i_mtime = parent_inode->i_ctime =
6346                 current_fs_time(parent_inode->i_sb);
6347         ret = btrfs_update_inode(trans, root, parent_inode);
6348         if (ret)
6349                 btrfs_abort_transaction(trans, root, ret);
6350         return ret;
6351
6352 fail_dir_item:
6353         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6354                 u64 local_index;
6355                 int err;
6356                 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
6357                                  key.objectid, root->root_key.objectid,
6358                                  parent_ino, &local_index, name, name_len);
6359
6360         } else if (add_backref) {
6361                 u64 local_index;
6362                 int err;
6363
6364                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6365                                           ino, parent_ino, &local_index);
6366         }
6367         return ret;
6368 }
6369
6370 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6371                             struct inode *dir, struct dentry *dentry,
6372                             struct inode *inode, int backref, u64 index)
6373 {
6374         int err = btrfs_add_link(trans, dir, inode,
6375                                  dentry->d_name.name, dentry->d_name.len,
6376                                  backref, index);
6377         if (err > 0)
6378                 err = -EEXIST;
6379         return err;
6380 }
6381
6382 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6383                         umode_t mode, dev_t rdev)
6384 {
6385         struct btrfs_trans_handle *trans;
6386         struct btrfs_root *root = BTRFS_I(dir)->root;
6387         struct inode *inode = NULL;
6388         int err;
6389         int drop_inode = 0;
6390         u64 objectid;
6391         u64 index = 0;
6392
6393         /*
6394          * 2 for inode item and ref
6395          * 2 for dir items
6396          * 1 for xattr if selinux is on
6397          */
6398         trans = btrfs_start_transaction(root, 5);
6399         if (IS_ERR(trans))
6400                 return PTR_ERR(trans);
6401
6402         err = btrfs_find_free_ino(root, &objectid);
6403         if (err)
6404                 goto out_unlock;
6405
6406         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6407                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6408                                 mode, &index);
6409         if (IS_ERR(inode)) {
6410                 err = PTR_ERR(inode);
6411                 goto out_unlock;
6412         }
6413
6414         /*
6415         * If the active LSM wants to access the inode during
6416         * d_instantiate it needs these. Smack checks to see
6417         * if the filesystem supports xattrs by looking at the
6418         * ops vector.
6419         */
6420         inode->i_op = &btrfs_special_inode_operations;
6421         init_special_inode(inode, inode->i_mode, rdev);
6422
6423         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6424         if (err)
6425                 goto out_unlock_inode;
6426
6427         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6428         if (err) {
6429                 goto out_unlock_inode;
6430         } else {
6431                 btrfs_update_inode(trans, root, inode);
6432                 unlock_new_inode(inode);
6433                 d_instantiate(dentry, inode);
6434         }
6435
6436 out_unlock:
6437         btrfs_end_transaction(trans, root);
6438         btrfs_balance_delayed_items(root);
6439         btrfs_btree_balance_dirty(root);
6440         if (drop_inode) {
6441                 inode_dec_link_count(inode);
6442                 iput(inode);
6443         }
6444         return err;
6445
6446 out_unlock_inode:
6447         drop_inode = 1;
6448         unlock_new_inode(inode);
6449         goto out_unlock;
6450
6451 }
6452
6453 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6454                         umode_t mode, bool excl)
6455 {
6456         struct btrfs_trans_handle *trans;
6457         struct btrfs_root *root = BTRFS_I(dir)->root;
6458         struct inode *inode = NULL;
6459         int drop_inode_on_err = 0;
6460         int err;
6461         u64 objectid;
6462         u64 index = 0;
6463
6464         /*
6465          * 2 for inode item and ref
6466          * 2 for dir items
6467          * 1 for xattr if selinux is on
6468          */
6469         trans = btrfs_start_transaction(root, 5);
6470         if (IS_ERR(trans))
6471                 return PTR_ERR(trans);
6472
6473         err = btrfs_find_free_ino(root, &objectid);
6474         if (err)
6475                 goto out_unlock;
6476
6477         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6478                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6479                                 mode, &index);
6480         if (IS_ERR(inode)) {
6481                 err = PTR_ERR(inode);
6482                 goto out_unlock;
6483         }
6484         drop_inode_on_err = 1;
6485         /*
6486         * If the active LSM wants to access the inode during
6487         * d_instantiate it needs these. Smack checks to see
6488         * if the filesystem supports xattrs by looking at the
6489         * ops vector.
6490         */
6491         inode->i_fop = &btrfs_file_operations;
6492         inode->i_op = &btrfs_file_inode_operations;
6493         inode->i_mapping->a_ops = &btrfs_aops;
6494
6495         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6496         if (err)
6497                 goto out_unlock_inode;
6498
6499         err = btrfs_update_inode(trans, root, inode);
6500         if (err)
6501                 goto out_unlock_inode;
6502
6503         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6504         if (err)
6505                 goto out_unlock_inode;
6506
6507         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6508         unlock_new_inode(inode);
6509         d_instantiate(dentry, inode);
6510
6511 out_unlock:
6512         btrfs_end_transaction(trans, root);
6513         if (err && drop_inode_on_err) {
6514                 inode_dec_link_count(inode);
6515                 iput(inode);
6516         }
6517         btrfs_balance_delayed_items(root);
6518         btrfs_btree_balance_dirty(root);
6519         return err;
6520
6521 out_unlock_inode:
6522         unlock_new_inode(inode);
6523         goto out_unlock;
6524
6525 }
6526
6527 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6528                       struct dentry *dentry)
6529 {
6530         struct btrfs_trans_handle *trans = NULL;
6531         struct btrfs_root *root = BTRFS_I(dir)->root;
6532         struct inode *inode = d_inode(old_dentry);
6533         u64 index;
6534         int err;
6535         int drop_inode = 0;
6536
6537         /* do not allow sys_link's with other subvols of the same device */
6538         if (root->objectid != BTRFS_I(inode)->root->objectid)
6539                 return -EXDEV;
6540
6541         if (inode->i_nlink >= BTRFS_LINK_MAX)
6542                 return -EMLINK;
6543
6544         err = btrfs_set_inode_index(dir, &index);
6545         if (err)
6546                 goto fail;
6547
6548         /*
6549          * 2 items for inode and inode ref
6550          * 2 items for dir items
6551          * 1 item for parent inode
6552          */
6553         trans = btrfs_start_transaction(root, 5);
6554         if (IS_ERR(trans)) {
6555                 err = PTR_ERR(trans);
6556                 trans = NULL;
6557                 goto fail;
6558         }
6559
6560         /* There are several dir indexes for this inode, clear the cache. */
6561         BTRFS_I(inode)->dir_index = 0ULL;
6562         inc_nlink(inode);
6563         inode_inc_iversion(inode);
6564         inode->i_ctime = current_fs_time(inode->i_sb);
6565         ihold(inode);
6566         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6567
6568         err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
6569
6570         if (err) {
6571                 drop_inode = 1;
6572         } else {
6573                 struct dentry *parent = dentry->d_parent;
6574                 err = btrfs_update_inode(trans, root, inode);
6575                 if (err)
6576                         goto fail;
6577                 if (inode->i_nlink == 1) {
6578                         /*
6579                          * If new hard link count is 1, it's a file created
6580                          * with open(2) O_TMPFILE flag.
6581                          */
6582                         err = btrfs_orphan_del(trans, inode);
6583                         if (err)
6584                                 goto fail;
6585                 }
6586                 d_instantiate(dentry, inode);
6587                 btrfs_log_new_name(trans, inode, NULL, parent);
6588         }
6589
6590         btrfs_balance_delayed_items(root);
6591 fail:
6592         if (trans)
6593                 btrfs_end_transaction(trans, root);
6594         if (drop_inode) {
6595                 inode_dec_link_count(inode);
6596                 iput(inode);
6597         }
6598         btrfs_btree_balance_dirty(root);
6599         return err;
6600 }
6601
6602 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6603 {
6604         struct inode *inode = NULL;
6605         struct btrfs_trans_handle *trans;
6606         struct btrfs_root *root = BTRFS_I(dir)->root;
6607         int err = 0;
6608         int drop_on_err = 0;
6609         u64 objectid = 0;
6610         u64 index = 0;
6611
6612         /*
6613          * 2 items for inode and ref
6614          * 2 items for dir items
6615          * 1 for xattr if selinux is on
6616          */
6617         trans = btrfs_start_transaction(root, 5);
6618         if (IS_ERR(trans))
6619                 return PTR_ERR(trans);
6620
6621         err = btrfs_find_free_ino(root, &objectid);
6622         if (err)
6623                 goto out_fail;
6624
6625         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6626                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6627                                 S_IFDIR | mode, &index);
6628         if (IS_ERR(inode)) {
6629                 err = PTR_ERR(inode);
6630                 goto out_fail;
6631         }
6632
6633         drop_on_err = 1;
6634         /* these must be set before we unlock the inode */
6635         inode->i_op = &btrfs_dir_inode_operations;
6636         inode->i_fop = &btrfs_dir_file_operations;
6637
6638         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6639         if (err)
6640                 goto out_fail_inode;
6641
6642         btrfs_i_size_write(inode, 0);
6643         err = btrfs_update_inode(trans, root, inode);
6644         if (err)
6645                 goto out_fail_inode;
6646
6647         err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
6648                              dentry->d_name.len, 0, index);
6649         if (err)
6650                 goto out_fail_inode;
6651
6652         d_instantiate(dentry, inode);
6653         /*
6654          * mkdir is special.  We're unlocking after we call d_instantiate
6655          * to avoid a race with nfsd calling d_instantiate.
6656          */
6657         unlock_new_inode(inode);
6658         drop_on_err = 0;
6659
6660 out_fail:
6661         btrfs_end_transaction(trans, root);
6662         if (drop_on_err) {
6663                 inode_dec_link_count(inode);
6664                 iput(inode);
6665         }
6666         btrfs_balance_delayed_items(root);
6667         btrfs_btree_balance_dirty(root);
6668         return err;
6669
6670 out_fail_inode:
6671         unlock_new_inode(inode);
6672         goto out_fail;
6673 }
6674
6675 /* Find next extent map of a given extent map, caller needs to ensure locks */
6676 static struct extent_map *next_extent_map(struct extent_map *em)
6677 {
6678         struct rb_node *next;
6679
6680         next = rb_next(&em->rb_node);
6681         if (!next)
6682                 return NULL;
6683         return container_of(next, struct extent_map, rb_node);
6684 }
6685
6686 static struct extent_map *prev_extent_map(struct extent_map *em)
6687 {
6688         struct rb_node *prev;
6689
6690         prev = rb_prev(&em->rb_node);
6691         if (!prev)
6692                 return NULL;
6693         return container_of(prev, struct extent_map, rb_node);
6694 }
6695
6696 /* helper for btfs_get_extent.  Given an existing extent in the tree,
6697  * the existing extent is the nearest extent to map_start,
6698  * and an extent that you want to insert, deal with overlap and insert
6699  * the best fitted new extent into the tree.
6700  */
6701 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6702                                 struct extent_map *existing,
6703                                 struct extent_map *em,
6704                                 u64 map_start)
6705 {
6706         struct extent_map *prev;
6707         struct extent_map *next;
6708         u64 start;
6709         u64 end;
6710         u64 start_diff;
6711
6712         BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6713
6714         if (existing->start > map_start) {
6715                 next = existing;
6716                 prev = prev_extent_map(next);
6717         } else {
6718                 prev = existing;
6719                 next = next_extent_map(prev);
6720         }
6721
6722         start = prev ? extent_map_end(prev) : em->start;
6723         start = max_t(u64, start, em->start);
6724         end = next ? next->start : extent_map_end(em);
6725         end = min_t(u64, end, extent_map_end(em));
6726         start_diff = start - em->start;
6727         em->start = start;
6728         em->len = end - start;
6729         if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6730             !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6731                 em->block_start += start_diff;
6732                 em->block_len -= start_diff;
6733         }
6734         return add_extent_mapping(em_tree, em, 0);
6735 }
6736
6737 static noinline int uncompress_inline(struct btrfs_path *path,
6738                                       struct page *page,
6739                                       size_t pg_offset, u64 extent_offset,
6740                                       struct btrfs_file_extent_item *item)
6741 {
6742         int ret;
6743         struct extent_buffer *leaf = path->nodes[0];
6744         char *tmp;
6745         size_t max_size;
6746         unsigned long inline_size;
6747         unsigned long ptr;
6748         int compress_type;
6749
6750         WARN_ON(pg_offset != 0);
6751         compress_type = btrfs_file_extent_compression(leaf, item);
6752         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6753         inline_size = btrfs_file_extent_inline_item_len(leaf,
6754                                         btrfs_item_nr(path->slots[0]));
6755         tmp = kmalloc(inline_size, GFP_NOFS);
6756         if (!tmp)
6757                 return -ENOMEM;
6758         ptr = btrfs_file_extent_inline_start(item);
6759
6760         read_extent_buffer(leaf, tmp, ptr, inline_size);
6761
6762         max_size = min_t(unsigned long, PAGE_SIZE, max_size);
6763         ret = btrfs_decompress(compress_type, tmp, page,
6764                                extent_offset, inline_size, max_size);
6765         kfree(tmp);
6766         return ret;
6767 }
6768
6769 /*
6770  * a bit scary, this does extent mapping from logical file offset to the disk.
6771  * the ugly parts come from merging extents from the disk with the in-ram
6772  * representation.  This gets more complex because of the data=ordered code,
6773  * where the in-ram extents might be locked pending data=ordered completion.
6774  *
6775  * This also copies inline extents directly into the page.
6776  */
6777
6778 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6779                                     size_t pg_offset, u64 start, u64 len,
6780                                     int create)
6781 {
6782         int ret;
6783         int err = 0;
6784         u64 extent_start = 0;
6785         u64 extent_end = 0;
6786         u64 objectid = btrfs_ino(inode);
6787         u32 found_type;
6788         struct btrfs_path *path = NULL;
6789         struct btrfs_root *root = BTRFS_I(inode)->root;
6790         struct btrfs_file_extent_item *item;
6791         struct extent_buffer *leaf;
6792         struct btrfs_key found_key;
6793         struct extent_map *em = NULL;
6794         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6795         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6796         struct btrfs_trans_handle *trans = NULL;
6797         const bool new_inline = !page || create;
6798
6799 again:
6800         read_lock(&em_tree->lock);
6801         em = lookup_extent_mapping(em_tree, start, len);
6802         if (em)
6803                 em->bdev = root->fs_info->fs_devices->latest_bdev;
6804         read_unlock(&em_tree->lock);
6805
6806         if (em) {
6807                 if (em->start > start || em->start + em->len <= start)
6808                         free_extent_map(em);
6809                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6810                         free_extent_map(em);
6811                 else
6812                         goto out;
6813         }
6814         em = alloc_extent_map();
6815         if (!em) {
6816                 err = -ENOMEM;
6817                 goto out;
6818         }
6819         em->bdev = root->fs_info->fs_devices->latest_bdev;
6820         em->start = EXTENT_MAP_HOLE;
6821         em->orig_start = EXTENT_MAP_HOLE;
6822         em->len = (u64)-1;
6823         em->block_len = (u64)-1;
6824
6825         if (!path) {
6826                 path = btrfs_alloc_path();
6827                 if (!path) {
6828                         err = -ENOMEM;
6829                         goto out;
6830                 }
6831                 /*
6832                  * Chances are we'll be called again, so go ahead and do
6833                  * readahead
6834                  */
6835                 path->reada = READA_FORWARD;
6836         }
6837
6838         ret = btrfs_lookup_file_extent(trans, root, path,
6839                                        objectid, start, trans != NULL);
6840         if (ret < 0) {
6841                 err = ret;
6842                 goto out;
6843         }
6844
6845         if (ret != 0) {
6846                 if (path->slots[0] == 0)
6847                         goto not_found;
6848                 path->slots[0]--;
6849         }
6850
6851         leaf = path->nodes[0];
6852         item = btrfs_item_ptr(leaf, path->slots[0],
6853                               struct btrfs_file_extent_item);
6854         /* are we inside the extent that was found? */
6855         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6856         found_type = found_key.type;
6857         if (found_key.objectid != objectid ||
6858             found_type != BTRFS_EXTENT_DATA_KEY) {
6859                 /*
6860                  * If we backup past the first extent we want to move forward
6861                  * and see if there is an extent in front of us, otherwise we'll
6862                  * say there is a hole for our whole search range which can
6863                  * cause problems.
6864                  */
6865                 extent_end = start;
6866                 goto next;
6867         }
6868
6869         found_type = btrfs_file_extent_type(leaf, item);
6870         extent_start = found_key.offset;
6871         if (found_type == BTRFS_FILE_EXTENT_REG ||
6872             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6873                 extent_end = extent_start +
6874                        btrfs_file_extent_num_bytes(leaf, item);
6875         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6876                 size_t size;
6877                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6878                 extent_end = ALIGN(extent_start + size, root->sectorsize);
6879         }
6880 next:
6881         if (start >= extent_end) {
6882                 path->slots[0]++;
6883                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6884                         ret = btrfs_next_leaf(root, path);
6885                         if (ret < 0) {
6886                                 err = ret;
6887                                 goto out;
6888                         }
6889                         if (ret > 0)
6890                                 goto not_found;
6891                         leaf = path->nodes[0];
6892                 }
6893                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6894                 if (found_key.objectid != objectid ||
6895                     found_key.type != BTRFS_EXTENT_DATA_KEY)
6896                         goto not_found;
6897                 if (start + len <= found_key.offset)
6898                         goto not_found;
6899                 if (start > found_key.offset)
6900                         goto next;
6901                 em->start = start;
6902                 em->orig_start = start;
6903                 em->len = found_key.offset - start;
6904                 goto not_found_em;
6905         }
6906
6907         btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em);
6908
6909         if (found_type == BTRFS_FILE_EXTENT_REG ||
6910             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6911                 goto insert;
6912         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6913                 unsigned long ptr;
6914                 char *map;
6915                 size_t size;
6916                 size_t extent_offset;
6917                 size_t copy_size;
6918
6919                 if (new_inline)
6920                         goto out;
6921
6922                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6923                 extent_offset = page_offset(page) + pg_offset - extent_start;
6924                 copy_size = min_t(u64, PAGE_SIZE - pg_offset,
6925                                   size - extent_offset);
6926                 em->start = extent_start + extent_offset;
6927                 em->len = ALIGN(copy_size, root->sectorsize);
6928                 em->orig_block_len = em->len;
6929                 em->orig_start = em->start;
6930                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6931                 if (create == 0 && !PageUptodate(page)) {
6932                         if (btrfs_file_extent_compression(leaf, item) !=
6933                             BTRFS_COMPRESS_NONE) {
6934                                 ret = uncompress_inline(path, page, pg_offset,
6935                                                         extent_offset, item);
6936                                 if (ret) {
6937                                         err = ret;
6938                                         goto out;
6939                                 }
6940                         } else {
6941                                 map = kmap(page);
6942                                 read_extent_buffer(leaf, map + pg_offset, ptr,
6943                                                    copy_size);
6944                                 if (pg_offset + copy_size < PAGE_SIZE) {
6945                                         memset(map + pg_offset + copy_size, 0,
6946                                                PAGE_SIZE - pg_offset -
6947                                                copy_size);
6948                                 }
6949                                 kunmap(page);
6950                         }
6951                         flush_dcache_page(page);
6952                 } else if (create && PageUptodate(page)) {
6953                         BUG();
6954                         if (!trans) {
6955                                 kunmap(page);
6956                                 free_extent_map(em);
6957                                 em = NULL;
6958
6959                                 btrfs_release_path(path);
6960                                 trans = btrfs_join_transaction(root);
6961
6962                                 if (IS_ERR(trans))
6963                                         return ERR_CAST(trans);
6964                                 goto again;
6965                         }
6966                         map = kmap(page);
6967                         write_extent_buffer(leaf, map + pg_offset, ptr,
6968                                             copy_size);
6969                         kunmap(page);
6970                         btrfs_mark_buffer_dirty(leaf);
6971                 }
6972                 set_extent_uptodate(io_tree, em->start,
6973                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
6974                 goto insert;
6975         }
6976 not_found:
6977         em->start = start;
6978         em->orig_start = start;
6979         em->len = len;
6980 not_found_em:
6981         em->block_start = EXTENT_MAP_HOLE;
6982         set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6983 insert:
6984         btrfs_release_path(path);
6985         if (em->start > start || extent_map_end(em) <= start) {
6986                 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6987                         em->start, em->len, start, len);
6988                 err = -EIO;
6989                 goto out;
6990         }
6991
6992         err = 0;
6993         write_lock(&em_tree->lock);
6994         ret = add_extent_mapping(em_tree, em, 0);
6995         /* it is possible that someone inserted the extent into the tree
6996          * while we had the lock dropped.  It is also possible that
6997          * an overlapping map exists in the tree
6998          */
6999         if (ret == -EEXIST) {
7000                 struct extent_map *existing;
7001
7002                 ret = 0;
7003
7004                 existing = search_extent_mapping(em_tree, start, len);
7005                 /*
7006                  * existing will always be non-NULL, since there must be
7007                  * extent causing the -EEXIST.
7008                  */
7009                 if (existing->start == em->start &&
7010                     extent_map_end(existing) == extent_map_end(em) &&
7011                     em->block_start == existing->block_start) {
7012                         /*
7013                          * these two extents are the same, it happens
7014                          * with inlines especially
7015                          */
7016                         free_extent_map(em);
7017                         em = existing;
7018                         err = 0;
7019
7020                 } else if (start >= extent_map_end(existing) ||
7021                     start <= existing->start) {
7022                         /*
7023                          * The existing extent map is the one nearest to
7024                          * the [start, start + len) range which overlaps
7025                          */
7026                         err = merge_extent_mapping(em_tree, existing,
7027                                                    em, start);
7028                         free_extent_map(existing);
7029                         if (err) {
7030                                 free_extent_map(em);
7031                                 em = NULL;
7032                         }
7033                 } else {
7034                         free_extent_map(em);
7035                         em = existing;
7036                         err = 0;
7037                 }
7038         }
7039         write_unlock(&em_tree->lock);
7040 out:
7041
7042         trace_btrfs_get_extent(root, em);
7043
7044         btrfs_free_path(path);
7045         if (trans) {
7046                 ret = btrfs_end_transaction(trans, root);
7047                 if (!err)
7048                         err = ret;
7049         }
7050         if (err) {
7051                 free_extent_map(em);
7052                 return ERR_PTR(err);
7053         }
7054         BUG_ON(!em); /* Error is always set */
7055         return em;
7056 }
7057
7058 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
7059                                            size_t pg_offset, u64 start, u64 len,
7060                                            int create)
7061 {
7062         struct extent_map *em;
7063         struct extent_map *hole_em = NULL;
7064         u64 range_start = start;
7065         u64 end;
7066         u64 found;
7067         u64 found_end;
7068         int err = 0;
7069
7070         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7071         if (IS_ERR(em))
7072                 return em;
7073         if (em) {
7074                 /*
7075                  * if our em maps to
7076                  * -  a hole or
7077                  * -  a pre-alloc extent,
7078                  * there might actually be delalloc bytes behind it.
7079                  */
7080                 if (em->block_start != EXTENT_MAP_HOLE &&
7081                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7082                         return em;
7083                 else
7084                         hole_em = em;
7085         }
7086
7087         /* check to see if we've wrapped (len == -1 or similar) */
7088         end = start + len;
7089         if (end < start)
7090                 end = (u64)-1;
7091         else
7092                 end -= 1;
7093
7094         em = NULL;
7095
7096         /* ok, we didn't find anything, lets look for delalloc */
7097         found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
7098                                  end, len, EXTENT_DELALLOC, 1);
7099         found_end = range_start + found;
7100         if (found_end < range_start)
7101                 found_end = (u64)-1;
7102
7103         /*
7104          * we didn't find anything useful, return
7105          * the original results from get_extent()
7106          */
7107         if (range_start > end || found_end <= start) {
7108                 em = hole_em;
7109                 hole_em = NULL;
7110                 goto out;
7111         }
7112
7113         /* adjust the range_start to make sure it doesn't
7114          * go backwards from the start they passed in
7115          */
7116         range_start = max(start, range_start);
7117         found = found_end - range_start;
7118
7119         if (found > 0) {
7120                 u64 hole_start = start;
7121                 u64 hole_len = len;
7122
7123                 em = alloc_extent_map();
7124                 if (!em) {
7125                         err = -ENOMEM;
7126                         goto out;
7127                 }
7128                 /*
7129                  * when btrfs_get_extent can't find anything it
7130                  * returns one huge hole
7131                  *
7132                  * make sure what it found really fits our range, and
7133                  * adjust to make sure it is based on the start from
7134                  * the caller
7135                  */
7136                 if (hole_em) {
7137                         u64 calc_end = extent_map_end(hole_em);
7138
7139                         if (calc_end <= start || (hole_em->start > end)) {
7140                                 free_extent_map(hole_em);
7141                                 hole_em = NULL;
7142                         } else {
7143                                 hole_start = max(hole_em->start, start);
7144                                 hole_len = calc_end - hole_start;
7145                         }
7146                 }
7147                 em->bdev = NULL;
7148                 if (hole_em && range_start > hole_start) {
7149                         /* our hole starts before our delalloc, so we
7150                          * have to return just the parts of the hole
7151                          * that go until  the delalloc starts
7152                          */
7153                         em->len = min(hole_len,
7154                                       range_start - hole_start);
7155                         em->start = hole_start;
7156                         em->orig_start = hole_start;
7157                         /*
7158                          * don't adjust block start at all,
7159                          * it is fixed at EXTENT_MAP_HOLE
7160                          */
7161                         em->block_start = hole_em->block_start;
7162                         em->block_len = hole_len;
7163                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7164                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7165                 } else {
7166                         em->start = range_start;
7167                         em->len = found;
7168                         em->orig_start = range_start;
7169                         em->block_start = EXTENT_MAP_DELALLOC;
7170                         em->block_len = found;
7171                 }
7172         } else if (hole_em) {
7173                 return hole_em;
7174         }
7175 out:
7176
7177         free_extent_map(hole_em);
7178         if (err) {
7179                 free_extent_map(em);
7180                 return ERR_PTR(err);
7181         }
7182         return em;
7183 }
7184
7185 static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7186                                                   const u64 start,
7187                                                   const u64 len,
7188                                                   const u64 orig_start,
7189                                                   const u64 block_start,
7190                                                   const u64 block_len,
7191                                                   const u64 orig_block_len,
7192                                                   const u64 ram_bytes,
7193                                                   const int type)
7194 {
7195         struct extent_map *em = NULL;
7196         int ret;
7197
7198         down_read(&BTRFS_I(inode)->dio_sem);
7199         if (type != BTRFS_ORDERED_NOCOW) {
7200                 em = create_pinned_em(inode, start, len, orig_start,
7201                                       block_start, block_len, orig_block_len,
7202                                       ram_bytes, type);
7203                 if (IS_ERR(em))
7204                         goto out;
7205         }
7206         ret = btrfs_add_ordered_extent_dio(inode, start, block_start,
7207                                            len, block_len, type);
7208         if (ret) {
7209                 if (em) {
7210                         free_extent_map(em);
7211                         btrfs_drop_extent_cache(inode, start,
7212                                                 start + len - 1, 0);
7213                 }
7214                 em = ERR_PTR(ret);
7215         }
7216  out:
7217         up_read(&BTRFS_I(inode)->dio_sem);
7218
7219         return em;
7220 }
7221
7222 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7223                                                   u64 start, u64 len)
7224 {
7225         struct btrfs_root *root = BTRFS_I(inode)->root;
7226         struct extent_map *em;
7227         struct btrfs_key ins;
7228         u64 alloc_hint;
7229         int ret;
7230
7231         alloc_hint = get_extent_allocation_hint(inode, start, len);
7232         ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
7233                                    alloc_hint, &ins, 1, 1);
7234         if (ret)
7235                 return ERR_PTR(ret);
7236
7237         em = btrfs_create_dio_extent(inode, start, ins.offset, start,
7238                                      ins.objectid, ins.offset, ins.offset,
7239                                      ins.offset, 0);
7240         btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
7241         if (IS_ERR(em))
7242                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7243
7244         return em;
7245 }
7246
7247 /*
7248  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7249  * block must be cow'd
7250  */
7251 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7252                               u64 *orig_start, u64 *orig_block_len,
7253                               u64 *ram_bytes)
7254 {
7255         struct btrfs_trans_handle *trans;
7256         struct btrfs_path *path;
7257         int ret;
7258         struct extent_buffer *leaf;
7259         struct btrfs_root *root = BTRFS_I(inode)->root;
7260         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7261         struct btrfs_file_extent_item *fi;
7262         struct btrfs_key key;
7263         u64 disk_bytenr;
7264         u64 backref_offset;
7265         u64 extent_end;
7266         u64 num_bytes;
7267         int slot;
7268         int found_type;
7269         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7270
7271         path = btrfs_alloc_path();
7272         if (!path)
7273                 return -ENOMEM;
7274
7275         ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7276                                        offset, 0);
7277         if (ret < 0)
7278                 goto out;
7279
7280         slot = path->slots[0];
7281         if (ret == 1) {
7282                 if (slot == 0) {
7283                         /* can't find the item, must cow */
7284                         ret = 0;
7285                         goto out;
7286                 }
7287                 slot--;
7288         }
7289         ret = 0;
7290         leaf = path->nodes[0];
7291         btrfs_item_key_to_cpu(leaf, &key, slot);
7292         if (key.objectid != btrfs_ino(inode) ||
7293             key.type != BTRFS_EXTENT_DATA_KEY) {
7294                 /* not our file or wrong item type, must cow */
7295                 goto out;
7296         }
7297
7298         if (key.offset > offset) {
7299                 /* Wrong offset, must cow */
7300                 goto out;
7301         }
7302
7303         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7304         found_type = btrfs_file_extent_type(leaf, fi);
7305         if (found_type != BTRFS_FILE_EXTENT_REG &&
7306             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7307                 /* not a regular extent, must cow */
7308                 goto out;
7309         }
7310
7311         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7312                 goto out;
7313
7314         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7315         if (extent_end <= offset)
7316                 goto out;
7317
7318         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7319         if (disk_bytenr == 0)
7320                 goto out;
7321
7322         if (btrfs_file_extent_compression(leaf, fi) ||
7323             btrfs_file_extent_encryption(leaf, fi) ||
7324             btrfs_file_extent_other_encoding(leaf, fi))
7325                 goto out;
7326
7327         backref_offset = btrfs_file_extent_offset(leaf, fi);
7328
7329         if (orig_start) {
7330                 *orig_start = key.offset - backref_offset;
7331                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7332                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7333         }
7334
7335         if (btrfs_extent_readonly(root, disk_bytenr))
7336                 goto out;
7337
7338         num_bytes = min(offset + *len, extent_end) - offset;
7339         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7340                 u64 range_end;
7341
7342                 range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
7343                 ret = test_range_bit(io_tree, offset, range_end,
7344                                      EXTENT_DELALLOC, 0, NULL);
7345                 if (ret) {
7346                         ret = -EAGAIN;
7347                         goto out;
7348                 }
7349         }
7350
7351         btrfs_release_path(path);
7352
7353         /*
7354          * look for other files referencing this extent, if we
7355          * find any we must cow
7356          */
7357         trans = btrfs_join_transaction(root);
7358         if (IS_ERR(trans)) {
7359                 ret = 0;
7360                 goto out;
7361         }
7362
7363         ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
7364                                     key.offset - backref_offset, disk_bytenr);
7365         btrfs_end_transaction(trans, root);
7366         if (ret) {
7367                 ret = 0;
7368                 goto out;
7369         }
7370
7371         /*
7372          * adjust disk_bytenr and num_bytes to cover just the bytes
7373          * in this extent we are about to write.  If there
7374          * are any csums in that range we have to cow in order
7375          * to keep the csums correct
7376          */
7377         disk_bytenr += backref_offset;
7378         disk_bytenr += offset - key.offset;
7379         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
7380                                 goto out;
7381         /*
7382          * all of the above have passed, it is safe to overwrite this extent
7383          * without cow
7384          */
7385         *len = num_bytes;
7386         ret = 1;
7387 out:
7388         btrfs_free_path(path);
7389         return ret;
7390 }
7391
7392 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7393 {
7394         struct radix_tree_root *root = &inode->i_mapping->page_tree;
7395         int found = false;
7396         void **pagep = NULL;
7397         struct page *page = NULL;
7398         int start_idx;
7399         int end_idx;
7400
7401         start_idx = start >> PAGE_SHIFT;
7402
7403         /*
7404          * end is the last byte in the last page.  end == start is legal
7405          */
7406         end_idx = end >> PAGE_SHIFT;
7407
7408         rcu_read_lock();
7409
7410         /* Most of the code in this while loop is lifted from
7411          * find_get_page.  It's been modified to begin searching from a
7412          * page and return just the first page found in that range.  If the
7413          * found idx is less than or equal to the end idx then we know that
7414          * a page exists.  If no pages are found or if those pages are
7415          * outside of the range then we're fine (yay!) */
7416         while (page == NULL &&
7417                radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7418                 page = radix_tree_deref_slot(pagep);
7419                 if (unlikely(!page))
7420                         break;
7421
7422                 if (radix_tree_exception(page)) {
7423                         if (radix_tree_deref_retry(page)) {
7424                                 page = NULL;
7425                                 continue;
7426                         }
7427                         /*
7428                          * Otherwise, shmem/tmpfs must be storing a swap entry
7429                          * here as an exceptional entry: so return it without
7430                          * attempting to raise page count.
7431                          */
7432                         page = NULL;
7433                         break; /* TODO: Is this relevant for this use case? */
7434                 }
7435
7436                 if (!page_cache_get_speculative(page)) {
7437                         page = NULL;
7438                         continue;
7439                 }
7440
7441                 /*
7442                  * Has the page moved?
7443                  * This is part of the lockless pagecache protocol. See
7444                  * include/linux/pagemap.h for details.
7445                  */
7446                 if (unlikely(page != *pagep)) {
7447                         put_page(page);
7448                         page = NULL;
7449                 }
7450         }
7451
7452         if (page) {
7453                 if (page->index <= end_idx)
7454                         found = true;
7455                 put_page(page);
7456         }
7457
7458         rcu_read_unlock();
7459         return found;
7460 }
7461
7462 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7463                               struct extent_state **cached_state, int writing)
7464 {
7465         struct btrfs_ordered_extent *ordered;
7466         int ret = 0;
7467
7468         while (1) {
7469                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7470                                  cached_state);
7471                 /*
7472                  * We're concerned with the entire range that we're going to be
7473                  * doing DIO to, so we need to make sure there's no ordered
7474                  * extents in this range.
7475                  */
7476                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
7477                                                      lockend - lockstart + 1);
7478
7479                 /*
7480                  * We need to make sure there are no buffered pages in this
7481                  * range either, we could have raced between the invalidate in
7482                  * generic_file_direct_write and locking the extent.  The
7483                  * invalidate needs to happen so that reads after a write do not
7484                  * get stale data.
7485                  */
7486                 if (!ordered &&
7487                     (!writing ||
7488                      !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7489                         break;
7490
7491                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7492                                      cached_state, GFP_NOFS);
7493
7494                 if (ordered) {
7495                         /*
7496                          * If we are doing a DIO read and the ordered extent we
7497                          * found is for a buffered write, we can not wait for it
7498                          * to complete and retry, because if we do so we can
7499                          * deadlock with concurrent buffered writes on page
7500                          * locks. This happens only if our DIO read covers more
7501                          * than one extent map, if at this point has already
7502                          * created an ordered extent for a previous extent map
7503                          * and locked its range in the inode's io tree, and a
7504                          * concurrent write against that previous extent map's
7505                          * range and this range started (we unlock the ranges
7506                          * in the io tree only when the bios complete and
7507                          * buffered writes always lock pages before attempting
7508                          * to lock range in the io tree).
7509                          */
7510                         if (writing ||
7511                             test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags))
7512                                 btrfs_start_ordered_extent(inode, ordered, 1);
7513                         else
7514                                 ret = -ENOTBLK;
7515                         btrfs_put_ordered_extent(ordered);
7516                 } else {
7517                         /*
7518                          * We could trigger writeback for this range (and wait
7519                          * for it to complete) and then invalidate the pages for
7520                          * this range (through invalidate_inode_pages2_range()),
7521                          * but that can lead us to a deadlock with a concurrent
7522                          * call to readpages() (a buffered read or a defrag call
7523                          * triggered a readahead) on a page lock due to an
7524                          * ordered dio extent we created before but did not have
7525                          * yet a corresponding bio submitted (whence it can not
7526                          * complete), which makes readpages() wait for that
7527                          * ordered extent to complete while holding a lock on
7528                          * that page.
7529                          */
7530                         ret = -ENOTBLK;
7531                 }
7532
7533                 if (ret)
7534                         break;
7535
7536                 cond_resched();
7537         }
7538
7539         return ret;
7540 }
7541
7542 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7543                                            u64 len, u64 orig_start,
7544                                            u64 block_start, u64 block_len,
7545                                            u64 orig_block_len, u64 ram_bytes,
7546                                            int type)
7547 {
7548         struct extent_map_tree *em_tree;
7549         struct extent_map *em;
7550         struct btrfs_root *root = BTRFS_I(inode)->root;
7551         int ret;
7552
7553         em_tree = &BTRFS_I(inode)->extent_tree;
7554         em = alloc_extent_map();
7555         if (!em)
7556                 return ERR_PTR(-ENOMEM);
7557
7558         em->start = start;
7559         em->orig_start = orig_start;
7560         em->mod_start = start;
7561         em->mod_len = len;
7562         em->len = len;
7563         em->block_len = block_len;
7564         em->block_start = block_start;
7565         em->bdev = root->fs_info->fs_devices->latest_bdev;
7566         em->orig_block_len = orig_block_len;
7567         em->ram_bytes = ram_bytes;
7568         em->generation = -1;
7569         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7570         if (type == BTRFS_ORDERED_PREALLOC)
7571                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7572
7573         do {
7574                 btrfs_drop_extent_cache(inode, em->start,
7575                                 em->start + em->len - 1, 0);
7576                 write_lock(&em_tree->lock);
7577                 ret = add_extent_mapping(em_tree, em, 1);
7578                 write_unlock(&em_tree->lock);
7579         } while (ret == -EEXIST);
7580
7581         if (ret) {
7582                 free_extent_map(em);
7583                 return ERR_PTR(ret);
7584         }
7585
7586         return em;
7587 }
7588
7589 static void adjust_dio_outstanding_extents(struct inode *inode,
7590                                            struct btrfs_dio_data *dio_data,
7591                                            const u64 len)
7592 {
7593         unsigned num_extents;
7594
7595         num_extents = (unsigned) div64_u64(len + BTRFS_MAX_EXTENT_SIZE - 1,
7596                                            BTRFS_MAX_EXTENT_SIZE);
7597         /*
7598          * If we have an outstanding_extents count still set then we're
7599          * within our reservation, otherwise we need to adjust our inode
7600          * counter appropriately.
7601          */
7602         if (dio_data->outstanding_extents) {
7603                 dio_data->outstanding_extents -= num_extents;
7604         } else {
7605                 spin_lock(&BTRFS_I(inode)->lock);
7606                 BTRFS_I(inode)->outstanding_extents += num_extents;
7607                 spin_unlock(&BTRFS_I(inode)->lock);
7608         }
7609 }
7610
7611 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7612                                    struct buffer_head *bh_result, int create)
7613 {
7614         struct extent_map *em;
7615         struct btrfs_root *root = BTRFS_I(inode)->root;
7616         struct extent_state *cached_state = NULL;
7617         struct btrfs_dio_data *dio_data = NULL;
7618         u64 start = iblock << inode->i_blkbits;
7619         u64 lockstart, lockend;
7620         u64 len = bh_result->b_size;
7621         int unlock_bits = EXTENT_LOCKED;
7622         int ret = 0;
7623
7624         if (create)
7625                 unlock_bits |= EXTENT_DIRTY;
7626         else
7627                 len = min_t(u64, len, root->sectorsize);
7628
7629         lockstart = start;
7630         lockend = start + len - 1;
7631
7632         if (current->journal_info) {
7633                 /*
7634                  * Need to pull our outstanding extents and set journal_info to NULL so
7635                  * that anything that needs to check if there's a transaction doesn't get
7636                  * confused.
7637                  */
7638                 dio_data = current->journal_info;
7639                 current->journal_info = NULL;
7640         }
7641
7642         /*
7643          * If this errors out it's because we couldn't invalidate pagecache for
7644          * this range and we need to fallback to buffered.
7645          */
7646         if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7647                                create)) {
7648                 ret = -ENOTBLK;
7649                 goto err;
7650         }
7651
7652         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
7653         if (IS_ERR(em)) {
7654                 ret = PTR_ERR(em);
7655                 goto unlock_err;
7656         }
7657
7658         /*
7659          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7660          * io.  INLINE is special, and we could probably kludge it in here, but
7661          * it's still buffered so for safety lets just fall back to the generic
7662          * buffered path.
7663          *
7664          * For COMPRESSED we _have_ to read the entire extent in so we can
7665          * decompress it, so there will be buffering required no matter what we
7666          * do, so go ahead and fallback to buffered.
7667          *
7668          * We return -ENOTBLK because that's what makes DIO go ahead and go back
7669          * to buffered IO.  Don't blame me, this is the price we pay for using
7670          * the generic code.
7671          */
7672         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7673             em->block_start == EXTENT_MAP_INLINE) {
7674                 free_extent_map(em);
7675                 ret = -ENOTBLK;
7676                 goto unlock_err;
7677         }
7678
7679         /* Just a good old fashioned hole, return */
7680         if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7681                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7682                 free_extent_map(em);
7683                 goto unlock_err;
7684         }
7685
7686         /*
7687          * We don't allocate a new extent in the following cases
7688          *
7689          * 1) The inode is marked as NODATACOW.  In this case we'll just use the
7690          * existing extent.
7691          * 2) The extent is marked as PREALLOC.  We're good to go here and can
7692          * just use the extent.
7693          *
7694          */
7695         if (!create) {
7696                 len = min(len, em->len - (start - em->start));
7697                 lockstart = start + len;
7698                 goto unlock;
7699         }
7700
7701         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7702             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7703              em->block_start != EXTENT_MAP_HOLE)) {
7704                 int type;
7705                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7706
7707                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7708                         type = BTRFS_ORDERED_PREALLOC;
7709                 else
7710                         type = BTRFS_ORDERED_NOCOW;
7711                 len = min(len, em->len - (start - em->start));
7712                 block_start = em->block_start + (start - em->start);
7713
7714                 if (can_nocow_extent(inode, start, &len, &orig_start,
7715                                      &orig_block_len, &ram_bytes) == 1 &&
7716                     btrfs_inc_nocow_writers(root->fs_info, block_start)) {
7717                         struct extent_map *em2;
7718
7719                         em2 = btrfs_create_dio_extent(inode, start, len,
7720                                                       orig_start, block_start,
7721                                                       len, orig_block_len,
7722                                                       ram_bytes, type);
7723                         btrfs_dec_nocow_writers(root->fs_info, block_start);
7724                         if (type == BTRFS_ORDERED_PREALLOC) {
7725                                 free_extent_map(em);
7726                                 em = em2;
7727                         }
7728                         if (em2 && IS_ERR(em2)) {
7729                                 ret = PTR_ERR(em2);
7730                                 goto unlock_err;
7731                         }
7732                         goto unlock;
7733                 }
7734         }
7735
7736         /*
7737          * this will cow the extent, reset the len in case we changed
7738          * it above
7739          */
7740         len = bh_result->b_size;
7741         free_extent_map(em);
7742         em = btrfs_new_extent_direct(inode, start, len);
7743         if (IS_ERR(em)) {
7744                 ret = PTR_ERR(em);
7745                 goto unlock_err;
7746         }
7747         len = min(len, em->len - (start - em->start));
7748 unlock:
7749         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7750                 inode->i_blkbits;
7751         bh_result->b_size = len;
7752         bh_result->b_bdev = em->bdev;
7753         set_buffer_mapped(bh_result);
7754         if (create) {
7755                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7756                         set_buffer_new(bh_result);
7757
7758                 /*
7759                  * Need to update the i_size under the extent lock so buffered
7760                  * readers will get the updated i_size when we unlock.
7761                  */
7762                 if (start + len > i_size_read(inode))
7763                         i_size_write(inode, start + len);
7764
7765                 adjust_dio_outstanding_extents(inode, dio_data, len);
7766                 btrfs_free_reserved_data_space(inode, start, len);
7767                 WARN_ON(dio_data->reserve < len);
7768                 dio_data->reserve -= len;
7769                 dio_data->unsubmitted_oe_range_end = start + len;
7770                 current->journal_info = dio_data;
7771         }
7772
7773         /*
7774          * In the case of write we need to clear and unlock the entire range,
7775          * in the case of read we need to unlock only the end area that we
7776          * aren't using if there is any left over space.
7777          */
7778         if (lockstart < lockend) {
7779                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7780                                  lockend, unlock_bits, 1, 0,
7781                                  &cached_state, GFP_NOFS);
7782         } else {
7783                 free_extent_state(cached_state);
7784         }
7785
7786         free_extent_map(em);
7787
7788         return 0;
7789
7790 unlock_err:
7791         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7792                          unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7793 err:
7794         if (dio_data)
7795                 current->journal_info = dio_data;
7796         /*
7797          * Compensate the delalloc release we do in btrfs_direct_IO() when we
7798          * write less data then expected, so that we don't underflow our inode's
7799          * outstanding extents counter.
7800          */
7801         if (create && dio_data)
7802                 adjust_dio_outstanding_extents(inode, dio_data, len);
7803
7804         return ret;
7805 }
7806
7807 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7808                                         int rw, int mirror_num)
7809 {
7810         struct btrfs_root *root = BTRFS_I(inode)->root;
7811         int ret;
7812
7813         BUG_ON(rw & REQ_WRITE);
7814
7815         bio_get(bio);
7816
7817         ret = btrfs_bio_wq_end_io(root->fs_info, bio,
7818                                   BTRFS_WQ_ENDIO_DIO_REPAIR);
7819         if (ret)
7820                 goto err;
7821
7822         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
7823 err:
7824         bio_put(bio);
7825         return ret;
7826 }
7827
7828 static int btrfs_check_dio_repairable(struct inode *inode,
7829                                       struct bio *failed_bio,
7830                                       struct io_failure_record *failrec,
7831                                       int failed_mirror)
7832 {
7833         int num_copies;
7834
7835         num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
7836                                       failrec->logical, failrec->len);
7837         if (num_copies == 1) {
7838                 /*
7839                  * we only have a single copy of the data, so don't bother with
7840                  * all the retry and error correction code that follows. no
7841                  * matter what the error is, it is very likely to persist.
7842                  */
7843                 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
7844                          num_copies, failrec->this_mirror, failed_mirror);
7845                 return 0;
7846         }
7847
7848         failrec->failed_mirror = failed_mirror;
7849         failrec->this_mirror++;
7850         if (failrec->this_mirror == failed_mirror)
7851                 failrec->this_mirror++;
7852
7853         if (failrec->this_mirror > num_copies) {
7854                 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
7855                          num_copies, failrec->this_mirror, failed_mirror);
7856                 return 0;
7857         }
7858
7859         return 1;
7860 }
7861
7862 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7863                         struct page *page, unsigned int pgoff,
7864                         u64 start, u64 end, int failed_mirror,
7865                         bio_end_io_t *repair_endio, void *repair_arg)
7866 {
7867         struct io_failure_record *failrec;
7868         struct bio *bio;
7869         int isector;
7870         int read_mode;
7871         int ret;
7872
7873         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
7874
7875         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7876         if (ret)
7877                 return ret;
7878
7879         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7880                                          failed_mirror);
7881         if (!ret) {
7882                 free_io_failure(inode, failrec);
7883                 return -EIO;
7884         }
7885
7886         if ((failed_bio->bi_vcnt > 1)
7887                 || (failed_bio->bi_io_vec->bv_len
7888                         > BTRFS_I(inode)->root->sectorsize))
7889                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
7890         else
7891                 read_mode = READ_SYNC;
7892
7893         isector = start - btrfs_io_bio(failed_bio)->logical;
7894         isector >>= inode->i_sb->s_blocksize_bits;
7895         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7896                                 pgoff, isector, repair_endio, repair_arg);
7897         if (!bio) {
7898                 free_io_failure(inode, failrec);
7899                 return -EIO;
7900         }
7901
7902         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7903                     "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7904                     read_mode, failrec->this_mirror, failrec->in_validation);
7905
7906         ret = submit_dio_repair_bio(inode, bio, read_mode,
7907                                     failrec->this_mirror);
7908         if (ret) {
7909                 free_io_failure(inode, failrec);
7910                 bio_put(bio);
7911         }
7912
7913         return ret;
7914 }
7915
7916 struct btrfs_retry_complete {
7917         struct completion done;
7918         struct inode *inode;
7919         u64 start;
7920         int uptodate;
7921 };
7922
7923 static void btrfs_retry_endio_nocsum(struct bio *bio)
7924 {
7925         struct btrfs_retry_complete *done = bio->bi_private;
7926         struct inode *inode;
7927         struct bio_vec *bvec;
7928         int i;
7929
7930         if (bio->bi_error)
7931                 goto end;
7932
7933         ASSERT(bio->bi_vcnt == 1);
7934         inode = bio->bi_io_vec->bv_page->mapping->host;
7935         ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
7936
7937         done->uptodate = 1;
7938         bio_for_each_segment_all(bvec, bio, i)
7939                 clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
7940 end:
7941         complete(&done->done);
7942         bio_put(bio);
7943 }
7944
7945 static int __btrfs_correct_data_nocsum(struct inode *inode,
7946                                        struct btrfs_io_bio *io_bio)
7947 {
7948         struct btrfs_fs_info *fs_info;
7949         struct bio_vec *bvec;
7950         struct btrfs_retry_complete done;
7951         u64 start;
7952         unsigned int pgoff;
7953         u32 sectorsize;
7954         int nr_sectors;
7955         int i;
7956         int ret;
7957
7958         fs_info = BTRFS_I(inode)->root->fs_info;
7959         sectorsize = BTRFS_I(inode)->root->sectorsize;
7960
7961         start = io_bio->logical;
7962         done.inode = inode;
7963
7964         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7965                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
7966                 pgoff = bvec->bv_offset;
7967
7968 next_block_or_try_again:
7969                 done.uptodate = 0;
7970                 done.start = start;
7971                 init_completion(&done.done);
7972
7973                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
7974                                 pgoff, start, start + sectorsize - 1,
7975                                 io_bio->mirror_num,
7976                                 btrfs_retry_endio_nocsum, &done);
7977                 if (ret)
7978                         return ret;
7979
7980                 wait_for_completion(&done.done);
7981
7982                 if (!done.uptodate) {
7983                         /* We might have another mirror, so try again */
7984                         goto next_block_or_try_again;
7985                 }
7986
7987                 start += sectorsize;
7988
7989                 if (nr_sectors--) {
7990                         pgoff += sectorsize;
7991                         goto next_block_or_try_again;
7992                 }
7993         }
7994
7995         return 0;
7996 }
7997
7998 static void btrfs_retry_endio(struct bio *bio)
7999 {
8000         struct btrfs_retry_complete *done = bio->bi_private;
8001         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8002         struct inode *inode;
8003         struct bio_vec *bvec;
8004         u64 start;
8005         int uptodate;
8006         int ret;
8007         int i;
8008
8009         if (bio->bi_error)
8010                 goto end;
8011
8012         uptodate = 1;
8013
8014         start = done->start;
8015
8016         ASSERT(bio->bi_vcnt == 1);
8017         inode = bio->bi_io_vec->bv_page->mapping->host;
8018         ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
8019
8020         bio_for_each_segment_all(bvec, bio, i) {
8021                 ret = __readpage_endio_check(done->inode, io_bio, i,
8022                                         bvec->bv_page, bvec->bv_offset,
8023                                         done->start, bvec->bv_len);
8024                 if (!ret)
8025                         clean_io_failure(done->inode, done->start,
8026                                         bvec->bv_page, bvec->bv_offset);
8027                 else
8028                         uptodate = 0;
8029         }
8030
8031         done->uptodate = uptodate;
8032 end:
8033         complete(&done->done);
8034         bio_put(bio);
8035 }
8036
8037 static int __btrfs_subio_endio_read(struct inode *inode,
8038                                     struct btrfs_io_bio *io_bio, int err)
8039 {
8040         struct btrfs_fs_info *fs_info;
8041         struct bio_vec *bvec;
8042         struct btrfs_retry_complete done;
8043         u64 start;
8044         u64 offset = 0;
8045         u32 sectorsize;
8046         int nr_sectors;
8047         unsigned int pgoff;
8048         int csum_pos;
8049         int i;
8050         int ret;
8051
8052         fs_info = BTRFS_I(inode)->root->fs_info;
8053         sectorsize = BTRFS_I(inode)->root->sectorsize;
8054
8055         err = 0;
8056         start = io_bio->logical;
8057         done.inode = inode;
8058
8059         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
8060                 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
8061
8062                 pgoff = bvec->bv_offset;
8063 next_block:
8064                 csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
8065                 ret = __readpage_endio_check(inode, io_bio, csum_pos,
8066                                         bvec->bv_page, pgoff, start,
8067                                         sectorsize);
8068                 if (likely(!ret))
8069                         goto next;
8070 try_again:
8071                 done.uptodate = 0;
8072                 done.start = start;
8073                 init_completion(&done.done);
8074
8075                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
8076                                 pgoff, start, start + sectorsize - 1,
8077                                 io_bio->mirror_num,
8078                                 btrfs_retry_endio, &done);
8079                 if (ret) {
8080                         err = ret;
8081                         goto next;
8082                 }
8083
8084                 wait_for_completion(&done.done);
8085
8086                 if (!done.uptodate) {
8087                         /* We might have another mirror, so try again */
8088                         goto try_again;
8089                 }
8090 next:
8091                 offset += sectorsize;
8092                 start += sectorsize;
8093
8094                 ASSERT(nr_sectors);
8095
8096                 if (--nr_sectors) {
8097                         pgoff += sectorsize;
8098                         goto next_block;
8099                 }
8100         }
8101
8102         return err;
8103 }
8104
8105 static int btrfs_subio_endio_read(struct inode *inode,
8106                                   struct btrfs_io_bio *io_bio, int err)
8107 {
8108         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8109
8110         if (skip_csum) {
8111                 if (unlikely(err))
8112                         return __btrfs_correct_data_nocsum(inode, io_bio);
8113                 else
8114                         return 0;
8115         } else {
8116                 return __btrfs_subio_endio_read(inode, io_bio, err);
8117         }
8118 }
8119
8120 static void btrfs_endio_direct_read(struct bio *bio)
8121 {
8122         struct btrfs_dio_private *dip = bio->bi_private;
8123         struct inode *inode = dip->inode;
8124         struct bio *dio_bio;
8125         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8126         int err = bio->bi_error;
8127
8128         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8129                 err = btrfs_subio_endio_read(inode, io_bio, err);
8130
8131         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8132                       dip->logical_offset + dip->bytes - 1);
8133         dio_bio = dip->dio_bio;
8134
8135         kfree(dip);
8136
8137         dio_bio->bi_error = bio->bi_error;
8138         dio_end_io(dio_bio, bio->bi_error);
8139
8140         if (io_bio->end_io)
8141                 io_bio->end_io(io_bio, err);
8142         bio_put(bio);
8143 }
8144
8145 static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
8146                                                     const u64 offset,
8147                                                     const u64 bytes,
8148                                                     const int uptodate)
8149 {
8150         struct btrfs_root *root = BTRFS_I(inode)->root;
8151         struct btrfs_ordered_extent *ordered = NULL;
8152         u64 ordered_offset = offset;
8153         u64 ordered_bytes = bytes;
8154         int ret;
8155
8156 again:
8157         ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8158                                                    &ordered_offset,
8159                                                    ordered_bytes,
8160                                                    uptodate);
8161         if (!ret)
8162                 goto out_test;
8163
8164         btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
8165                         finish_ordered_fn, NULL, NULL);
8166         btrfs_queue_work(root->fs_info->endio_write_workers,
8167                          &ordered->work);
8168 out_test:
8169         /*
8170          * our bio might span multiple ordered extents.  If we haven't
8171          * completed the accounting for the whole dio, go back and try again
8172          */
8173         if (ordered_offset < offset + bytes) {
8174                 ordered_bytes = offset + bytes - ordered_offset;
8175                 ordered = NULL;
8176                 goto again;
8177         }
8178 }
8179
8180 static void btrfs_endio_direct_write(struct bio *bio)
8181 {
8182         struct btrfs_dio_private *dip = bio->bi_private;
8183         struct bio *dio_bio = dip->dio_bio;
8184
8185         btrfs_endio_direct_write_update_ordered(dip->inode,
8186                                                 dip->logical_offset,
8187                                                 dip->bytes,
8188                                                 !bio->bi_error);
8189
8190         kfree(dip);
8191
8192         dio_bio->bi_error = bio->bi_error;
8193         dio_end_io(dio_bio, bio->bi_error);
8194         bio_put(bio);
8195 }
8196
8197 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
8198                                     struct bio *bio, int mirror_num,
8199                                     unsigned long bio_flags, u64 offset)
8200 {
8201         int ret;
8202         struct btrfs_root *root = BTRFS_I(inode)->root;
8203         ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
8204         BUG_ON(ret); /* -ENOMEM */
8205         return 0;
8206 }
8207
8208 static void btrfs_end_dio_bio(struct bio *bio)
8209 {
8210         struct btrfs_dio_private *dip = bio->bi_private;
8211         int err = bio->bi_error;
8212
8213         if (err)
8214                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8215                            "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
8216                            btrfs_ino(dip->inode), bio->bi_rw,
8217                            (unsigned long long)bio->bi_iter.bi_sector,
8218                            bio->bi_iter.bi_size, err);
8219
8220         if (dip->subio_endio)
8221                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8222
8223         if (err) {
8224                 dip->errors = 1;
8225
8226                 /*
8227                  * before atomic variable goto zero, we must make sure
8228                  * dip->errors is perceived to be set.
8229                  */
8230                 smp_mb__before_atomic();
8231         }
8232
8233         /* if there are more bios still pending for this dio, just exit */
8234         if (!atomic_dec_and_test(&dip->pending_bios))
8235                 goto out;
8236
8237         if (dip->errors) {
8238                 bio_io_error(dip->orig_bio);
8239         } else {
8240                 dip->dio_bio->bi_error = 0;
8241                 bio_endio(dip->orig_bio);
8242         }
8243 out:
8244         bio_put(bio);
8245 }
8246
8247 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
8248                                        u64 first_sector, gfp_t gfp_flags)
8249 {
8250         struct bio *bio;
8251         bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
8252         if (bio)
8253                 bio_associate_current(bio);
8254         return bio;
8255 }
8256
8257 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
8258                                                  struct inode *inode,
8259                                                  struct btrfs_dio_private *dip,
8260                                                  struct bio *bio,
8261                                                  u64 file_offset)
8262 {
8263         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8264         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8265         int ret;
8266
8267         /*
8268          * We load all the csum data we need when we submit
8269          * the first bio to reduce the csum tree search and
8270          * contention.
8271          */
8272         if (dip->logical_offset == file_offset) {
8273                 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
8274                                                 file_offset);
8275                 if (ret)
8276                         return ret;
8277         }
8278
8279         if (bio == dip->orig_bio)
8280                 return 0;
8281
8282         file_offset -= dip->logical_offset;
8283         file_offset >>= inode->i_sb->s_blocksize_bits;
8284         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8285
8286         return 0;
8287 }
8288
8289 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
8290                                          int rw, u64 file_offset, int skip_sum,
8291                                          int async_submit)
8292 {
8293         struct btrfs_dio_private *dip = bio->bi_private;
8294         int write = rw & REQ_WRITE;
8295         struct btrfs_root *root = BTRFS_I(inode)->root;
8296         int ret;
8297
8298         if (async_submit)
8299                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8300
8301         bio_get(bio);
8302
8303         if (!write) {
8304                 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
8305                                 BTRFS_WQ_ENDIO_DATA);
8306                 if (ret)
8307                         goto err;
8308         }
8309
8310         if (skip_sum)
8311                 goto map;
8312
8313         if (write && async_submit) {
8314                 ret = btrfs_wq_submit_bio(root->fs_info,
8315                                    inode, rw, bio, 0, 0,
8316                                    file_offset,
8317                                    __btrfs_submit_bio_start_direct_io,
8318                                    __btrfs_submit_bio_done);
8319                 goto err;
8320         } else if (write) {
8321                 /*
8322                  * If we aren't doing async submit, calculate the csum of the
8323                  * bio now.
8324                  */
8325                 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
8326                 if (ret)
8327                         goto err;
8328         } else {
8329                 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
8330                                                      file_offset);
8331                 if (ret)
8332                         goto err;
8333         }
8334 map:
8335         ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
8336 err:
8337         bio_put(bio);
8338         return ret;
8339 }
8340
8341 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8342                                     int skip_sum)
8343 {
8344         struct inode *inode = dip->inode;
8345         struct btrfs_root *root = BTRFS_I(inode)->root;
8346         struct bio *bio;
8347         struct bio *orig_bio = dip->orig_bio;
8348         struct bio_vec *bvec = orig_bio->bi_io_vec;
8349         u64 start_sector = orig_bio->bi_iter.bi_sector;
8350         u64 file_offset = dip->logical_offset;
8351         u64 submit_len = 0;
8352         u64 map_length;
8353         u32 blocksize = root->sectorsize;
8354         int async_submit = 0;
8355         int nr_sectors;
8356         int ret;
8357         int i;
8358
8359         map_length = orig_bio->bi_iter.bi_size;
8360         ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
8361                               &map_length, NULL, 0);
8362         if (ret)
8363                 return -EIO;
8364
8365         if (map_length >= orig_bio->bi_iter.bi_size) {
8366                 bio = orig_bio;
8367                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8368                 goto submit;
8369         }
8370
8371         /* async crcs make it difficult to collect full stripe writes. */
8372         if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8373                 async_submit = 0;
8374         else
8375                 async_submit = 1;
8376
8377         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
8378         if (!bio)
8379                 return -ENOMEM;
8380
8381         bio->bi_private = dip;
8382         bio->bi_end_io = btrfs_end_dio_bio;
8383         btrfs_io_bio(bio)->logical = file_offset;
8384         atomic_inc(&dip->pending_bios);
8385
8386         while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
8387                 nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len);
8388                 i = 0;
8389 next_block:
8390                 if (unlikely(map_length < submit_len + blocksize ||
8391                     bio_add_page(bio, bvec->bv_page, blocksize,
8392                             bvec->bv_offset + (i * blocksize)) < blocksize)) {
8393                         /*
8394                          * inc the count before we submit the bio so
8395                          * we know the end IO handler won't happen before
8396                          * we inc the count. Otherwise, the dip might get freed
8397                          * before we're done setting it up
8398                          */
8399                         atomic_inc(&dip->pending_bios);
8400                         ret = __btrfs_submit_dio_bio(bio, inode, rw,
8401                                                      file_offset, skip_sum,
8402                                                      async_submit);
8403                         if (ret) {
8404                                 bio_put(bio);
8405                                 atomic_dec(&dip->pending_bios);
8406                                 goto out_err;
8407                         }
8408
8409                         start_sector += submit_len >> 9;
8410                         file_offset += submit_len;
8411
8412                         submit_len = 0;
8413
8414                         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8415                                                   start_sector, GFP_NOFS);
8416                         if (!bio)
8417                                 goto out_err;
8418                         bio->bi_private = dip;
8419                         bio->bi_end_io = btrfs_end_dio_bio;
8420                         btrfs_io_bio(bio)->logical = file_offset;
8421
8422                         map_length = orig_bio->bi_iter.bi_size;
8423                         ret = btrfs_map_block(root->fs_info, rw,
8424                                               start_sector << 9,
8425                                               &map_length, NULL, 0);
8426                         if (ret) {
8427                                 bio_put(bio);
8428                                 goto out_err;
8429                         }
8430
8431                         goto next_block;
8432                 } else {
8433                         submit_len += blocksize;
8434                         if (--nr_sectors) {
8435                                 i++;
8436                                 goto next_block;
8437                         }
8438                         bvec++;
8439                 }
8440         }
8441
8442 submit:
8443         ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
8444                                      async_submit);
8445         if (!ret)
8446                 return 0;
8447
8448         bio_put(bio);
8449 out_err:
8450         dip->errors = 1;
8451         /*
8452          * before atomic variable goto zero, we must
8453          * make sure dip->errors is perceived to be set.
8454          */
8455         smp_mb__before_atomic();
8456         if (atomic_dec_and_test(&dip->pending_bios))
8457                 bio_io_error(dip->orig_bio);
8458
8459         /* bio_end_io() will handle error, so we needn't return it */
8460         return 0;
8461 }
8462
8463 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8464                                 struct inode *inode, loff_t file_offset)
8465 {
8466         struct btrfs_dio_private *dip = NULL;
8467         struct bio *io_bio = NULL;
8468         struct btrfs_io_bio *btrfs_bio;
8469         int skip_sum;
8470         int write = rw & REQ_WRITE;
8471         int ret = 0;
8472
8473         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8474
8475         io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
8476         if (!io_bio) {
8477                 ret = -ENOMEM;
8478                 goto free_ordered;
8479         }
8480
8481         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8482         if (!dip) {
8483                 ret = -ENOMEM;
8484                 goto free_ordered;
8485         }
8486
8487         dip->private = dio_bio->bi_private;
8488         dip->inode = inode;
8489         dip->logical_offset = file_offset;
8490         dip->bytes = dio_bio->bi_iter.bi_size;
8491         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8492         io_bio->bi_private = dip;
8493         dip->orig_bio = io_bio;
8494         dip->dio_bio = dio_bio;
8495         atomic_set(&dip->pending_bios, 0);
8496         btrfs_bio = btrfs_io_bio(io_bio);
8497         btrfs_bio->logical = file_offset;
8498
8499         if (write) {
8500                 io_bio->bi_end_io = btrfs_endio_direct_write;
8501         } else {
8502                 io_bio->bi_end_io = btrfs_endio_direct_read;
8503                 dip->subio_endio = btrfs_subio_endio_read;
8504         }
8505
8506         /*
8507          * Reset the range for unsubmitted ordered extents (to a 0 length range)
8508          * even if we fail to submit a bio, because in such case we do the
8509          * corresponding error handling below and it must not be done a second
8510          * time by btrfs_direct_IO().
8511          */
8512         if (write) {
8513                 struct btrfs_dio_data *dio_data = current->journal_info;
8514
8515                 dio_data->unsubmitted_oe_range_end = dip->logical_offset +
8516                         dip->bytes;
8517                 dio_data->unsubmitted_oe_range_start =
8518                         dio_data->unsubmitted_oe_range_end;
8519         }
8520
8521         ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
8522         if (!ret)
8523                 return;
8524
8525         if (btrfs_bio->end_io)
8526                 btrfs_bio->end_io(btrfs_bio, ret);
8527
8528 free_ordered:
8529         /*
8530          * If we arrived here it means either we failed to submit the dip
8531          * or we either failed to clone the dio_bio or failed to allocate the
8532          * dip. If we cloned the dio_bio and allocated the dip, we can just
8533          * call bio_endio against our io_bio so that we get proper resource
8534          * cleanup if we fail to submit the dip, otherwise, we must do the
8535          * same as btrfs_endio_direct_[write|read] because we can't call these
8536          * callbacks - they require an allocated dip and a clone of dio_bio.
8537          */
8538         if (io_bio && dip) {
8539                 io_bio->bi_error = -EIO;
8540                 bio_endio(io_bio);
8541                 /*
8542                  * The end io callbacks free our dip, do the final put on io_bio
8543                  * and all the cleanup and final put for dio_bio (through
8544                  * dio_end_io()).
8545                  */
8546                 dip = NULL;
8547                 io_bio = NULL;
8548         } else {
8549                 if (write)
8550                         btrfs_endio_direct_write_update_ordered(inode,
8551                                                 file_offset,
8552                                                 dio_bio->bi_iter.bi_size,
8553                                                 0);
8554                 else
8555                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8556                               file_offset + dio_bio->bi_iter.bi_size - 1);
8557
8558                 dio_bio->bi_error = -EIO;
8559                 /*
8560                  * Releases and cleans up our dio_bio, no need to bio_put()
8561                  * nor bio_endio()/bio_io_error() against dio_bio.
8562                  */
8563                 dio_end_io(dio_bio, ret);
8564         }
8565         if (io_bio)
8566                 bio_put(io_bio);
8567         kfree(dip);
8568 }
8569
8570 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
8571                         const struct iov_iter *iter, loff_t offset)
8572 {
8573         int seg;
8574         int i;
8575         unsigned blocksize_mask = root->sectorsize - 1;
8576         ssize_t retval = -EINVAL;
8577
8578         if (offset & blocksize_mask)
8579                 goto out;
8580
8581         if (iov_iter_alignment(iter) & blocksize_mask)
8582                 goto out;
8583
8584         /* If this is a write we don't need to check anymore */
8585         if (iov_iter_rw(iter) == WRITE)
8586                 return 0;
8587         /*
8588          * Check to make sure we don't have duplicate iov_base's in this
8589          * iovec, if so return EINVAL, otherwise we'll get csum errors
8590          * when reading back.
8591          */
8592         for (seg = 0; seg < iter->nr_segs; seg++) {
8593                 for (i = seg + 1; i < iter->nr_segs; i++) {
8594                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8595                                 goto out;
8596                 }
8597         }
8598         retval = 0;
8599 out:
8600         return retval;
8601 }
8602
8603 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8604 {
8605         struct file *file = iocb->ki_filp;
8606         struct inode *inode = file->f_mapping->host;
8607         struct btrfs_root *root = BTRFS_I(inode)->root;
8608         struct btrfs_dio_data dio_data = { 0 };
8609         loff_t offset = iocb->ki_pos;
8610         size_t count = 0;
8611         int flags = 0;
8612         bool wakeup = true;
8613         bool relock = false;
8614         ssize_t ret;
8615
8616         if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
8617                 return 0;
8618
8619         inode_dio_begin(inode);
8620         smp_mb__after_atomic();
8621
8622         /*
8623          * The generic stuff only does filemap_write_and_wait_range, which
8624          * isn't enough if we've written compressed pages to this area, so
8625          * we need to flush the dirty pages again to make absolutely sure
8626          * that any outstanding dirty pages are on disk.
8627          */
8628         count = iov_iter_count(iter);
8629         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8630                      &BTRFS_I(inode)->runtime_flags))
8631                 filemap_fdatawrite_range(inode->i_mapping, offset,
8632                                          offset + count - 1);
8633
8634         if (iov_iter_rw(iter) == WRITE) {
8635                 /*
8636                  * If the write DIO is beyond the EOF, we need update
8637                  * the isize, but it is protected by i_mutex. So we can
8638                  * not unlock the i_mutex at this case.
8639                  */
8640                 if (offset + count <= inode->i_size) {
8641                         inode_unlock(inode);
8642                         relock = true;
8643                 }
8644                 ret = btrfs_delalloc_reserve_space(inode, offset, count);
8645                 if (ret)
8646                         goto out;
8647                 dio_data.outstanding_extents = div64_u64(count +
8648                                                 BTRFS_MAX_EXTENT_SIZE - 1,
8649                                                 BTRFS_MAX_EXTENT_SIZE);
8650
8651                 /*
8652                  * We need to know how many extents we reserved so that we can
8653                  * do the accounting properly if we go over the number we
8654                  * originally calculated.  Abuse current->journal_info for this.
8655                  */
8656                 dio_data.reserve = round_up(count, root->sectorsize);
8657                 dio_data.unsubmitted_oe_range_start = (u64)offset;
8658                 dio_data.unsubmitted_oe_range_end = (u64)offset;
8659                 current->journal_info = &dio_data;
8660         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8661                                      &BTRFS_I(inode)->runtime_flags)) {
8662                 inode_dio_end(inode);
8663                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8664                 wakeup = false;
8665         }
8666
8667         ret = __blockdev_direct_IO(iocb, inode,
8668                                    BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
8669                                    iter, btrfs_get_blocks_direct, NULL,
8670                                    btrfs_submit_direct, flags);
8671         if (iov_iter_rw(iter) == WRITE) {
8672                 current->journal_info = NULL;
8673                 if (ret < 0 && ret != -EIOCBQUEUED) {
8674                         if (dio_data.reserve)
8675                                 btrfs_delalloc_release_space(inode, offset,
8676                                                              dio_data.reserve);
8677                         /*
8678                          * On error we might have left some ordered extents
8679                          * without submitting corresponding bios for them, so
8680                          * cleanup them up to avoid other tasks getting them
8681                          * and waiting for them to complete forever.
8682                          */
8683                         if (dio_data.unsubmitted_oe_range_start <
8684                             dio_data.unsubmitted_oe_range_end)
8685                                 btrfs_endio_direct_write_update_ordered(inode,
8686                                         dio_data.unsubmitted_oe_range_start,
8687                                         dio_data.unsubmitted_oe_range_end -
8688                                         dio_data.unsubmitted_oe_range_start,
8689                                         0);
8690                 } else if (ret >= 0 && (size_t)ret < count)
8691                         btrfs_delalloc_release_space(inode, offset,
8692                                                      count - (size_t)ret);
8693         }
8694 out:
8695         if (wakeup)
8696                 inode_dio_end(inode);
8697         if (relock)
8698                 inode_lock(inode);
8699
8700         return ret;
8701 }
8702
8703 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8704
8705 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8706                 __u64 start, __u64 len)
8707 {
8708         int     ret;
8709
8710         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8711         if (ret)
8712                 return ret;
8713
8714         return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8715 }
8716
8717 int btrfs_readpage(struct file *file, struct page *page)
8718 {
8719         struct extent_io_tree *tree;
8720         tree = &BTRFS_I(page->mapping->host)->io_tree;
8721         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8722 }
8723
8724 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8725 {
8726         struct extent_io_tree *tree;
8727         struct inode *inode = page->mapping->host;
8728         int ret;
8729
8730         if (current->flags & PF_MEMALLOC) {
8731                 redirty_page_for_writepage(wbc, page);
8732                 unlock_page(page);
8733                 return 0;
8734         }
8735
8736         /*
8737          * If we are under memory pressure we will call this directly from the
8738          * VM, we need to make sure we have the inode referenced for the ordered
8739          * extent.  If not just return like we didn't do anything.
8740          */
8741         if (!igrab(inode)) {
8742                 redirty_page_for_writepage(wbc, page);
8743                 return AOP_WRITEPAGE_ACTIVATE;
8744         }
8745         tree = &BTRFS_I(page->mapping->host)->io_tree;
8746         ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
8747         btrfs_add_delayed_iput(inode);
8748         return ret;
8749 }
8750
8751 static int btrfs_writepages(struct address_space *mapping,
8752                             struct writeback_control *wbc)
8753 {
8754         struct extent_io_tree *tree;
8755
8756         tree = &BTRFS_I(mapping->host)->io_tree;
8757         return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
8758 }
8759
8760 static int
8761 btrfs_readpages(struct file *file, struct address_space *mapping,
8762                 struct list_head *pages, unsigned nr_pages)
8763 {
8764         struct extent_io_tree *tree;
8765         tree = &BTRFS_I(mapping->host)->io_tree;
8766         return extent_readpages(tree, mapping, pages, nr_pages,
8767                                 btrfs_get_extent);
8768 }
8769 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8770 {
8771         struct extent_io_tree *tree;
8772         struct extent_map_tree *map;
8773         int ret;
8774
8775         tree = &BTRFS_I(page->mapping->host)->io_tree;
8776         map = &BTRFS_I(page->mapping->host)->extent_tree;
8777         ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8778         if (ret == 1) {
8779                 ClearPagePrivate(page);
8780                 set_page_private(page, 0);
8781                 put_page(page);
8782         }
8783         return ret;
8784 }
8785
8786 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8787 {
8788         if (PageWriteback(page) || PageDirty(page))
8789                 return 0;
8790         return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
8791 }
8792
8793 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8794                                  unsigned int length)
8795 {
8796         struct inode *inode = page->mapping->host;
8797         struct extent_io_tree *tree;
8798         struct btrfs_ordered_extent *ordered;
8799         struct extent_state *cached_state = NULL;
8800         u64 page_start = page_offset(page);
8801         u64 page_end = page_start + PAGE_SIZE - 1;
8802         u64 start;
8803         u64 end;
8804         int inode_evicting = inode->i_state & I_FREEING;
8805
8806         /*
8807          * we have the page locked, so new writeback can't start,
8808          * and the dirty bit won't be cleared while we are here.
8809          *
8810          * Wait for IO on this page so that we can safely clear
8811          * the PagePrivate2 bit and do ordered accounting
8812          */
8813         wait_on_page_writeback(page);
8814
8815         tree = &BTRFS_I(inode)->io_tree;
8816         if (offset) {
8817                 btrfs_releasepage(page, GFP_NOFS);
8818                 return;
8819         }
8820
8821         if (!inode_evicting)
8822                 lock_extent_bits(tree, page_start, page_end, &cached_state);
8823 again:
8824         start = page_start;
8825         ordered = btrfs_lookup_ordered_range(inode, start,
8826                                         page_end - start + 1);
8827         if (ordered) {
8828                 end = min(page_end, ordered->file_offset + ordered->len - 1);
8829                 /*
8830                  * IO on this page will never be started, so we need
8831                  * to account for any ordered extents now
8832                  */
8833                 if (!inode_evicting)
8834                         clear_extent_bit(tree, start, end,
8835                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8836                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8837                                          EXTENT_DEFRAG, 1, 0, &cached_state,
8838                                          GFP_NOFS);
8839                 /*
8840                  * whoever cleared the private bit is responsible
8841                  * for the finish_ordered_io
8842                  */
8843                 if (TestClearPagePrivate2(page)) {
8844                         struct btrfs_ordered_inode_tree *tree;
8845                         u64 new_len;
8846
8847                         tree = &BTRFS_I(inode)->ordered_tree;
8848
8849                         spin_lock_irq(&tree->lock);
8850                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8851                         new_len = start - ordered->file_offset;
8852                         if (new_len < ordered->truncated_len)
8853                                 ordered->truncated_len = new_len;
8854                         spin_unlock_irq(&tree->lock);
8855
8856                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8857                                                            start,
8858                                                            end - start + 1, 1))
8859                                 btrfs_finish_ordered_io(ordered);
8860                 }
8861                 btrfs_put_ordered_extent(ordered);
8862                 if (!inode_evicting) {
8863                         cached_state = NULL;
8864                         lock_extent_bits(tree, start, end,
8865                                          &cached_state);
8866                 }
8867
8868                 start = end + 1;
8869                 if (start < page_end)
8870                         goto again;
8871         }
8872
8873         /*
8874          * Qgroup reserved space handler
8875          * Page here will be either
8876          * 1) Already written to disk
8877          *    In this case, its reserved space is released from data rsv map
8878          *    and will be freed by delayed_ref handler finally.
8879          *    So even we call qgroup_free_data(), it won't decrease reserved
8880          *    space.
8881          * 2) Not written to disk
8882          *    This means the reserved space should be freed here.
8883          */
8884         btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
8885         if (!inode_evicting) {
8886                 clear_extent_bit(tree, page_start, page_end,
8887                                  EXTENT_LOCKED | EXTENT_DIRTY |
8888                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8889                                  EXTENT_DEFRAG, 1, 1,
8890                                  &cached_state, GFP_NOFS);
8891
8892                 __btrfs_releasepage(page, GFP_NOFS);
8893         }
8894
8895         ClearPageChecked(page);
8896         if (PagePrivate(page)) {
8897                 ClearPagePrivate(page);
8898                 set_page_private(page, 0);
8899                 put_page(page);
8900         }
8901 }
8902
8903 /*
8904  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8905  * called from a page fault handler when a page is first dirtied. Hence we must
8906  * be careful to check for EOF conditions here. We set the page up correctly
8907  * for a written page which means we get ENOSPC checking when writing into
8908  * holes and correct delalloc and unwritten extent mapping on filesystems that
8909  * support these features.
8910  *
8911  * We are not allowed to take the i_mutex here so we have to play games to
8912  * protect against truncate races as the page could now be beyond EOF.  Because
8913  * vmtruncate() writes the inode size before removing pages, once we have the
8914  * page lock we can determine safely if the page is beyond EOF. If it is not
8915  * beyond EOF, then the page is guaranteed safe against truncation until we
8916  * unlock the page.
8917  */
8918 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8919 {
8920         struct page *page = vmf->page;
8921         struct inode *inode = file_inode(vma->vm_file);
8922         struct btrfs_root *root = BTRFS_I(inode)->root;
8923         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8924         struct btrfs_ordered_extent *ordered;
8925         struct extent_state *cached_state = NULL;
8926         char *kaddr;
8927         unsigned long zero_start;
8928         loff_t size;
8929         int ret;
8930         int reserved = 0;
8931         u64 reserved_space;
8932         u64 page_start;
8933         u64 page_end;
8934         u64 end;
8935
8936         reserved_space = PAGE_SIZE;
8937
8938         sb_start_pagefault(inode->i_sb);
8939         page_start = page_offset(page);
8940         page_end = page_start + PAGE_SIZE - 1;
8941         end = page_end;
8942
8943         /*
8944          * Reserving delalloc space after obtaining the page lock can lead to
8945          * deadlock. For example, if a dirty page is locked by this function
8946          * and the call to btrfs_delalloc_reserve_space() ends up triggering
8947          * dirty page write out, then the btrfs_writepage() function could
8948          * end up waiting indefinitely to get a lock on the page currently
8949          * being processed by btrfs_page_mkwrite() function.
8950          */
8951         ret = btrfs_delalloc_reserve_space(inode, page_start,
8952                                            reserved_space);
8953         if (!ret) {
8954                 ret = file_update_time(vma->vm_file);
8955                 reserved = 1;
8956         }
8957         if (ret) {
8958                 if (ret == -ENOMEM)
8959                         ret = VM_FAULT_OOM;
8960                 else /* -ENOSPC, -EIO, etc */
8961                         ret = VM_FAULT_SIGBUS;
8962                 if (reserved)
8963                         goto out;
8964                 goto out_noreserve;
8965         }
8966
8967         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8968 again:
8969         lock_page(page);
8970         size = i_size_read(inode);
8971
8972         if ((page->mapping != inode->i_mapping) ||
8973             (page_start >= size)) {
8974                 /* page got truncated out from underneath us */
8975                 goto out_unlock;
8976         }
8977         wait_on_page_writeback(page);
8978
8979         lock_extent_bits(io_tree, page_start, page_end, &cached_state);
8980         set_page_extent_mapped(page);
8981
8982         /*
8983          * we can't set the delalloc bits if there are pending ordered
8984          * extents.  Drop our locks and wait for them to finish
8985          */
8986         ordered = btrfs_lookup_ordered_range(inode, page_start, page_end);
8987         if (ordered) {
8988                 unlock_extent_cached(io_tree, page_start, page_end,
8989                                      &cached_state, GFP_NOFS);
8990                 unlock_page(page);
8991                 btrfs_start_ordered_extent(inode, ordered, 1);
8992                 btrfs_put_ordered_extent(ordered);
8993                 goto again;
8994         }
8995
8996         if (page->index == ((size - 1) >> PAGE_SHIFT)) {
8997                 reserved_space = round_up(size - page_start, root->sectorsize);
8998                 if (reserved_space < PAGE_SIZE) {
8999                         end = page_start + reserved_space - 1;
9000                         spin_lock(&BTRFS_I(inode)->lock);
9001                         BTRFS_I(inode)->outstanding_extents++;
9002                         spin_unlock(&BTRFS_I(inode)->lock);
9003                         btrfs_delalloc_release_space(inode, page_start,
9004                                                 PAGE_SIZE - reserved_space);
9005                 }
9006         }
9007
9008         /*
9009          * XXX - page_mkwrite gets called every time the page is dirtied, even
9010          * if it was already dirty, so for space accounting reasons we need to
9011          * clear any delalloc bits for the range we are fixing to save.  There
9012          * is probably a better way to do this, but for now keep consistent with
9013          * prepare_pages in the normal write path.
9014          */
9015         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
9016                           EXTENT_DIRTY | EXTENT_DELALLOC |
9017                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
9018                           0, 0, &cached_state, GFP_NOFS);
9019
9020         ret = btrfs_set_extent_delalloc(inode, page_start, end,
9021                                         &cached_state);
9022         if (ret) {
9023                 unlock_extent_cached(io_tree, page_start, page_end,
9024                                      &cached_state, GFP_NOFS);
9025                 ret = VM_FAULT_SIGBUS;
9026                 goto out_unlock;
9027         }
9028         ret = 0;
9029
9030         /* page is wholly or partially inside EOF */
9031         if (page_start + PAGE_SIZE > size)
9032                 zero_start = size & ~PAGE_MASK;
9033         else
9034                 zero_start = PAGE_SIZE;
9035
9036         if (zero_start != PAGE_SIZE) {
9037                 kaddr = kmap(page);
9038                 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
9039                 flush_dcache_page(page);
9040                 kunmap(page);
9041         }
9042         ClearPageChecked(page);
9043         set_page_dirty(page);
9044         SetPageUptodate(page);
9045
9046         BTRFS_I(inode)->last_trans = root->fs_info->generation;
9047         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
9048         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
9049
9050         unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
9051
9052 out_unlock:
9053         if (!ret) {
9054                 sb_end_pagefault(inode->i_sb);
9055                 return VM_FAULT_LOCKED;
9056         }
9057         unlock_page(page);
9058 out:
9059         btrfs_delalloc_release_space(inode, page_start, reserved_space);
9060 out_noreserve:
9061         sb_end_pagefault(inode->i_sb);
9062         return ret;
9063 }
9064
9065 static int btrfs_truncate(struct inode *inode)
9066 {
9067         struct btrfs_root *root = BTRFS_I(inode)->root;
9068         struct btrfs_block_rsv *rsv;
9069         int ret = 0;
9070         int err = 0;
9071         struct btrfs_trans_handle *trans;
9072         u64 mask = root->sectorsize - 1;
9073         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
9074
9075         ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
9076                                        (u64)-1);
9077         if (ret)
9078                 return ret;
9079
9080         /*
9081          * Yes ladies and gentlemen, this is indeed ugly.  The fact is we have
9082          * 3 things going on here
9083          *
9084          * 1) We need to reserve space for our orphan item and the space to
9085          * delete our orphan item.  Lord knows we don't want to have a dangling
9086          * orphan item because we didn't reserve space to remove it.
9087          *
9088          * 2) We need to reserve space to update our inode.
9089          *
9090          * 3) We need to have something to cache all the space that is going to
9091          * be free'd up by the truncate operation, but also have some slack
9092          * space reserved in case it uses space during the truncate (thank you
9093          * very much snapshotting).
9094          *
9095          * And we need these to all be separate.  The fact is we can use a lot of
9096          * space doing the truncate, and we have no earthly idea how much space
9097          * we will use, so we need the truncate reservation to be separate so it
9098          * doesn't end up using space reserved for updating the inode or
9099          * removing the orphan item.  We also need to be able to stop the
9100          * transaction and start a new one, which means we need to be able to
9101          * update the inode several times, and we have no idea of knowing how
9102          * many times that will be, so we can't just reserve 1 item for the
9103          * entirety of the operation, so that has to be done separately as well.
9104          * Then there is the orphan item, which does indeed need to be held on
9105          * to for the whole operation, and we need nobody to touch this reserved
9106          * space except the orphan code.
9107          *
9108          * So that leaves us with
9109          *
9110          * 1) root->orphan_block_rsv - for the orphan deletion.
9111          * 2) rsv - for the truncate reservation, which we will steal from the
9112          * transaction reservation.
9113          * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
9114          * updating the inode.
9115          */
9116         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
9117         if (!rsv)
9118                 return -ENOMEM;
9119         rsv->size = min_size;
9120         rsv->failfast = 1;
9121
9122         /*
9123          * 1 for the truncate slack space
9124          * 1 for updating the inode.
9125          */
9126         trans = btrfs_start_transaction(root, 2);
9127         if (IS_ERR(trans)) {
9128                 err = PTR_ERR(trans);
9129                 goto out;
9130         }
9131
9132         /* Migrate the slack space for the truncate to our reserve */
9133         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
9134                                       min_size, 0);
9135         BUG_ON(ret);
9136
9137         /*
9138          * So if we truncate and then write and fsync we normally would just
9139          * write the extents that changed, which is a problem if we need to
9140          * first truncate that entire inode.  So set this flag so we write out
9141          * all of the extents in the inode to the sync log so we're completely
9142          * safe.
9143          */
9144         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
9145         trans->block_rsv = rsv;
9146
9147         while (1) {
9148                 ret = btrfs_truncate_inode_items(trans, root, inode,
9149                                                  inode->i_size,
9150                                                  BTRFS_EXTENT_DATA_KEY);
9151                 if (ret != -ENOSPC && ret != -EAGAIN) {
9152                         err = ret;
9153                         break;
9154                 }
9155
9156                 trans->block_rsv = &root->fs_info->trans_block_rsv;
9157                 ret = btrfs_update_inode(trans, root, inode);
9158                 if (ret) {
9159                         err = ret;
9160                         break;
9161                 }
9162
9163                 btrfs_end_transaction(trans, root);
9164                 btrfs_btree_balance_dirty(root);
9165
9166                 trans = btrfs_start_transaction(root, 2);
9167                 if (IS_ERR(trans)) {
9168                         ret = err = PTR_ERR(trans);
9169                         trans = NULL;
9170                         break;
9171                 }
9172
9173                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
9174                                               rsv, min_size, 0);
9175                 BUG_ON(ret);    /* shouldn't happen */
9176                 trans->block_rsv = rsv;
9177         }
9178
9179         if (ret == 0 && inode->i_nlink > 0) {
9180                 trans->block_rsv = root->orphan_block_rsv;
9181                 ret = btrfs_orphan_del(trans, inode);
9182                 if (ret)
9183                         err = ret;
9184         }
9185
9186         if (trans) {
9187                 trans->block_rsv = &root->fs_info->trans_block_rsv;
9188                 ret = btrfs_update_inode(trans, root, inode);
9189                 if (ret && !err)
9190                         err = ret;
9191
9192                 ret = btrfs_end_transaction(trans, root);
9193                 btrfs_btree_balance_dirty(root);
9194         }
9195 out:
9196         btrfs_free_block_rsv(root, rsv);
9197
9198         if (ret && !err)
9199                 err = ret;
9200
9201         return err;
9202 }
9203
9204 /*
9205  * create a new subvolume directory/inode (helper for the ioctl).
9206  */
9207 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
9208                              struct btrfs_root *new_root,
9209                              struct btrfs_root *parent_root,
9210                              u64 new_dirid)
9211 {
9212         struct inode *inode;
9213         int err;
9214         u64 index = 0;
9215
9216         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9217                                 new_dirid, new_dirid,
9218                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
9219                                 &index);
9220         if (IS_ERR(inode))
9221                 return PTR_ERR(inode);
9222         inode->i_op = &btrfs_dir_inode_operations;
9223         inode->i_fop = &btrfs_dir_file_operations;
9224
9225         set_nlink(inode, 1);
9226         btrfs_i_size_write(inode, 0);
9227         unlock_new_inode(inode);
9228
9229         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9230         if (err)
9231                 btrfs_err(new_root->fs_info,
9232                           "error inheriting subvolume %llu properties: %d",
9233                           new_root->root_key.objectid, err);
9234
9235         err = btrfs_update_inode(trans, new_root, inode);
9236
9237         iput(inode);
9238         return err;
9239 }
9240
9241 struct inode *btrfs_alloc_inode(struct super_block *sb)
9242 {
9243         struct btrfs_inode *ei;
9244         struct inode *inode;
9245
9246         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
9247         if (!ei)
9248                 return NULL;
9249
9250         ei->root = NULL;
9251         ei->generation = 0;
9252         ei->last_trans = 0;
9253         ei->last_sub_trans = 0;
9254         ei->logged_trans = 0;
9255         ei->delalloc_bytes = 0;
9256         ei->defrag_bytes = 0;
9257         ei->disk_i_size = 0;
9258         ei->flags = 0;
9259         ei->csum_bytes = 0;
9260         ei->index_cnt = (u64)-1;
9261         ei->dir_index = 0;
9262         ei->last_unlink_trans = 0;
9263         ei->last_log_commit = 0;
9264         ei->delayed_iput_count = 0;
9265
9266         spin_lock_init(&ei->lock);
9267         ei->outstanding_extents = 0;
9268         ei->reserved_extents = 0;
9269
9270         ei->runtime_flags = 0;
9271         ei->force_compress = BTRFS_COMPRESS_NONE;
9272
9273         ei->delayed_node = NULL;
9274
9275         ei->i_otime.tv_sec = 0;
9276         ei->i_otime.tv_nsec = 0;
9277
9278         inode = &ei->vfs_inode;
9279         extent_map_tree_init(&ei->extent_tree);
9280         extent_io_tree_init(&ei->io_tree, &inode->i_data);
9281         extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
9282         ei->io_tree.track_uptodate = 1;
9283         ei->io_failure_tree.track_uptodate = 1;
9284         atomic_set(&ei->sync_writers, 0);
9285         mutex_init(&ei->log_mutex);
9286         mutex_init(&ei->delalloc_mutex);
9287         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9288         INIT_LIST_HEAD(&ei->delalloc_inodes);
9289         INIT_LIST_HEAD(&ei->delayed_iput);
9290         RB_CLEAR_NODE(&ei->rb_node);
9291         init_rwsem(&ei->dio_sem);
9292
9293         return inode;
9294 }
9295
9296 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9297 void btrfs_test_destroy_inode(struct inode *inode)
9298 {
9299         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9300         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9301 }
9302 #endif
9303
9304 static void btrfs_i_callback(struct rcu_head *head)
9305 {
9306         struct inode *inode = container_of(head, struct inode, i_rcu);
9307         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9308 }
9309
9310 void btrfs_destroy_inode(struct inode *inode)
9311 {
9312         struct btrfs_ordered_extent *ordered;
9313         struct btrfs_root *root = BTRFS_I(inode)->root;
9314
9315         WARN_ON(!hlist_empty(&inode->i_dentry));
9316         WARN_ON(inode->i_data.nrpages);
9317         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9318         WARN_ON(BTRFS_I(inode)->reserved_extents);
9319         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9320         WARN_ON(BTRFS_I(inode)->csum_bytes);
9321         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9322
9323         /*
9324          * This can happen where we create an inode, but somebody else also
9325          * created the same inode and we need to destroy the one we already
9326          * created.
9327          */
9328         if (!root)
9329                 goto free;
9330
9331         if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9332                      &BTRFS_I(inode)->runtime_flags)) {
9333                 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
9334                         btrfs_ino(inode));
9335                 atomic_dec(&root->orphan_inodes);
9336         }
9337
9338         while (1) {
9339                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9340                 if (!ordered)
9341                         break;
9342                 else {
9343                         btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
9344                                 ordered->file_offset, ordered->len);
9345                         btrfs_remove_ordered_extent(inode, ordered);
9346                         btrfs_put_ordered_extent(ordered);
9347                         btrfs_put_ordered_extent(ordered);
9348                 }
9349         }
9350         btrfs_qgroup_check_reserved_leak(inode);
9351         inode_tree_del(inode);
9352         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9353 free:
9354         call_rcu(&inode->i_rcu, btrfs_i_callback);
9355 }
9356
9357 int btrfs_drop_inode(struct inode *inode)
9358 {
9359         struct btrfs_root *root = BTRFS_I(inode)->root;
9360
9361         if (root == NULL)
9362                 return 1;
9363
9364         /* the snap/subvol tree is on deleting */
9365         if (btrfs_root_refs(&root->root_item) == 0)
9366                 return 1;
9367         else
9368                 return generic_drop_inode(inode);
9369 }
9370
9371 static void init_once(void *foo)
9372 {
9373         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9374
9375         inode_init_once(&ei->vfs_inode);
9376 }
9377
9378 void btrfs_destroy_cachep(void)
9379 {
9380         /*
9381          * Make sure all delayed rcu free inodes are flushed before we
9382          * destroy cache.
9383          */
9384         rcu_barrier();
9385         kmem_cache_destroy(btrfs_inode_cachep);
9386         kmem_cache_destroy(btrfs_trans_handle_cachep);
9387         kmem_cache_destroy(btrfs_transaction_cachep);
9388         kmem_cache_destroy(btrfs_path_cachep);
9389         kmem_cache_destroy(btrfs_free_space_cachep);
9390 }
9391
9392 int btrfs_init_cachep(void)
9393 {
9394         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9395                         sizeof(struct btrfs_inode), 0,
9396                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
9397                         init_once);
9398         if (!btrfs_inode_cachep)
9399                 goto fail;
9400
9401         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9402                         sizeof(struct btrfs_trans_handle), 0,
9403                         SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9404         if (!btrfs_trans_handle_cachep)
9405                 goto fail;
9406
9407         btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9408                         sizeof(struct btrfs_transaction), 0,
9409                         SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
9410         if (!btrfs_transaction_cachep)
9411                 goto fail;
9412
9413         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9414                         sizeof(struct btrfs_path), 0,
9415                         SLAB_MEM_SPREAD, NULL);
9416         if (!btrfs_path_cachep)
9417                 goto fail;
9418
9419         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9420                         sizeof(struct btrfs_free_space), 0,
9421                         SLAB_MEM_SPREAD, NULL);
9422         if (!btrfs_free_space_cachep)
9423                 goto fail;
9424
9425         return 0;
9426 fail:
9427         btrfs_destroy_cachep();
9428         return -ENOMEM;
9429 }
9430
9431 static int btrfs_getattr(struct vfsmount *mnt,
9432                          struct dentry *dentry, struct kstat *stat)
9433 {
9434         u64 delalloc_bytes;
9435         struct inode *inode = d_inode(dentry);
9436         u32 blocksize = inode->i_sb->s_blocksize;
9437
9438         generic_fillattr(inode, stat);
9439         stat->dev = BTRFS_I(inode)->root->anon_dev;
9440
9441         spin_lock(&BTRFS_I(inode)->lock);
9442         delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
9443         spin_unlock(&BTRFS_I(inode)->lock);
9444         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9445                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9446         return 0;
9447 }
9448
9449 static int btrfs_rename_exchange(struct inode *old_dir,
9450                               struct dentry *old_dentry,
9451                               struct inode *new_dir,
9452                               struct dentry *new_dentry)
9453 {
9454         struct btrfs_trans_handle *trans;
9455         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9456         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9457         struct inode *new_inode = new_dentry->d_inode;
9458         struct inode *old_inode = old_dentry->d_inode;
9459         struct timespec ctime = CURRENT_TIME;
9460         struct dentry *parent;
9461         u64 old_ino = btrfs_ino(old_inode);
9462         u64 new_ino = btrfs_ino(new_inode);
9463         u64 old_idx = 0;
9464         u64 new_idx = 0;
9465         u64 root_objectid;
9466         int ret;
9467         bool root_log_pinned = false;
9468         bool dest_log_pinned = false;
9469
9470         /* we only allow rename subvolume link between subvolumes */
9471         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9472                 return -EXDEV;
9473
9474         /* close the race window with snapshot create/destroy ioctl */
9475         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9476                 down_read(&root->fs_info->subvol_sem);
9477         if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9478                 down_read(&dest->fs_info->subvol_sem);
9479
9480         /*
9481          * We want to reserve the absolute worst case amount of items.  So if
9482          * both inodes are subvols and we need to unlink them then that would
9483          * require 4 item modifications, but if they are both normal inodes it
9484          * would require 5 item modifications, so we'll assume their normal
9485          * inodes.  So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9486          * should cover the worst case number of items we'll modify.
9487          */
9488         trans = btrfs_start_transaction(root, 12);
9489         if (IS_ERR(trans)) {
9490                 ret = PTR_ERR(trans);
9491                 goto out_notrans;
9492         }
9493
9494         /*
9495          * We need to find a free sequence number both in the source and
9496          * in the destination directory for the exchange.
9497          */
9498         ret = btrfs_set_inode_index(new_dir, &old_idx);
9499         if (ret)
9500                 goto out_fail;
9501         ret = btrfs_set_inode_index(old_dir, &new_idx);
9502         if (ret)
9503                 goto out_fail;
9504
9505         BTRFS_I(old_inode)->dir_index = 0ULL;
9506         BTRFS_I(new_inode)->dir_index = 0ULL;
9507
9508         /* Reference for the source. */
9509         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9510                 /* force full log commit if subvolume involved. */
9511                 btrfs_set_log_full_commit(root->fs_info, trans);
9512         } else {
9513                 btrfs_pin_log_trans(root);
9514                 root_log_pinned = true;
9515                 ret = btrfs_insert_inode_ref(trans, dest,
9516                                              new_dentry->d_name.name,
9517                                              new_dentry->d_name.len,
9518                                              old_ino,
9519                                              btrfs_ino(new_dir), old_idx);
9520                 if (ret)
9521                         goto out_fail;
9522         }
9523
9524         /* And now for the dest. */
9525         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9526                 /* force full log commit if subvolume involved. */
9527                 btrfs_set_log_full_commit(dest->fs_info, trans);
9528         } else {
9529                 btrfs_pin_log_trans(dest);
9530                 dest_log_pinned = true;
9531                 ret = btrfs_insert_inode_ref(trans, root,
9532                                              old_dentry->d_name.name,
9533                                              old_dentry->d_name.len,
9534                                              new_ino,
9535                                              btrfs_ino(old_dir), new_idx);
9536                 if (ret)
9537                         goto out_fail;
9538         }
9539
9540         /* Update inode version and ctime/mtime. */
9541         inode_inc_iversion(old_dir);
9542         inode_inc_iversion(new_dir);
9543         inode_inc_iversion(old_inode);
9544         inode_inc_iversion(new_inode);
9545         old_dir->i_ctime = old_dir->i_mtime = ctime;
9546         new_dir->i_ctime = new_dir->i_mtime = ctime;
9547         old_inode->i_ctime = ctime;
9548         new_inode->i_ctime = ctime;
9549
9550         if (old_dentry->d_parent != new_dentry->d_parent) {
9551                 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
9552                 btrfs_record_unlink_dir(trans, new_dir, new_inode, 1);
9553         }
9554
9555         /* src is a subvolume */
9556         if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
9557                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9558                 ret = btrfs_unlink_subvol(trans, root, old_dir,
9559                                           root_objectid,
9560                                           old_dentry->d_name.name,
9561                                           old_dentry->d_name.len);
9562         } else { /* src is an inode */
9563                 ret = __btrfs_unlink_inode(trans, root, old_dir,
9564                                            old_dentry->d_inode,
9565                                            old_dentry->d_name.name,
9566                                            old_dentry->d_name.len);
9567                 if (!ret)
9568                         ret = btrfs_update_inode(trans, root, old_inode);
9569         }
9570         if (ret) {
9571                 btrfs_abort_transaction(trans, root, ret);
9572                 goto out_fail;
9573         }
9574
9575         /* dest is a subvolume */
9576         if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
9577                 root_objectid = BTRFS_I(new_inode)->root->root_key.objectid;
9578                 ret = btrfs_unlink_subvol(trans, dest, new_dir,
9579                                           root_objectid,
9580                                           new_dentry->d_name.name,
9581                                           new_dentry->d_name.len);
9582         } else { /* dest is an inode */
9583                 ret = __btrfs_unlink_inode(trans, dest, new_dir,
9584                                            new_dentry->d_inode,
9585                                            new_dentry->d_name.name,
9586                                            new_dentry->d_name.len);
9587                 if (!ret)
9588                         ret = btrfs_update_inode(trans, dest, new_inode);
9589         }
9590         if (ret) {
9591                 btrfs_abort_transaction(trans, root, ret);
9592                 goto out_fail;
9593         }
9594
9595         ret = btrfs_add_link(trans, new_dir, old_inode,
9596                              new_dentry->d_name.name,
9597                              new_dentry->d_name.len, 0, old_idx);
9598         if (ret) {
9599                 btrfs_abort_transaction(trans, root, ret);
9600                 goto out_fail;
9601         }
9602
9603         ret = btrfs_add_link(trans, old_dir, new_inode,
9604                              old_dentry->d_name.name,
9605                              old_dentry->d_name.len, 0, new_idx);
9606         if (ret) {
9607                 btrfs_abort_transaction(trans, root, ret);
9608                 goto out_fail;
9609         }
9610
9611         if (old_inode->i_nlink == 1)
9612                 BTRFS_I(old_inode)->dir_index = old_idx;
9613         if (new_inode->i_nlink == 1)
9614                 BTRFS_I(new_inode)->dir_index = new_idx;
9615
9616         if (root_log_pinned) {
9617                 parent = new_dentry->d_parent;
9618                 btrfs_log_new_name(trans, old_inode, old_dir, parent);
9619                 btrfs_end_log_trans(root);
9620                 root_log_pinned = false;
9621         }
9622         if (dest_log_pinned) {
9623                 parent = old_dentry->d_parent;
9624                 btrfs_log_new_name(trans, new_inode, new_dir, parent);
9625                 btrfs_end_log_trans(dest);
9626                 dest_log_pinned = false;
9627         }
9628 out_fail:
9629         /*
9630          * If we have pinned a log and an error happened, we unpin tasks
9631          * trying to sync the log and force them to fallback to a transaction
9632          * commit if the log currently contains any of the inodes involved in
9633          * this rename operation (to ensure we do not persist a log with an
9634          * inconsistent state for any of these inodes or leading to any
9635          * inconsistencies when replayed). If the transaction was aborted, the
9636          * abortion reason is propagated to userspace when attempting to commit
9637          * the transaction. If the log does not contain any of these inodes, we
9638          * allow the tasks to sync it.
9639          */
9640         if (ret && (root_log_pinned || dest_log_pinned)) {
9641                 if (btrfs_inode_in_log(old_dir, root->fs_info->generation) ||
9642                     btrfs_inode_in_log(new_dir, root->fs_info->generation) ||
9643                     btrfs_inode_in_log(old_inode, root->fs_info->generation) ||
9644                     (new_inode &&
9645                      btrfs_inode_in_log(new_inode, root->fs_info->generation)))
9646                     btrfs_set_log_full_commit(root->fs_info, trans);
9647
9648                 if (root_log_pinned) {
9649                         btrfs_end_log_trans(root);
9650                         root_log_pinned = false;
9651                 }
9652                 if (dest_log_pinned) {
9653                         btrfs_end_log_trans(dest);
9654                         dest_log_pinned = false;
9655                 }
9656         }
9657         ret = btrfs_end_transaction(trans, root);
9658 out_notrans:
9659         if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9660                 up_read(&dest->fs_info->subvol_sem);
9661         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9662                 up_read(&root->fs_info->subvol_sem);
9663
9664         return ret;
9665 }
9666
9667 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans,
9668                                      struct btrfs_root *root,
9669                                      struct inode *dir,
9670                                      struct dentry *dentry)
9671 {
9672         int ret;
9673         struct inode *inode;
9674         u64 objectid;
9675         u64 index;
9676
9677         ret = btrfs_find_free_ino(root, &objectid);
9678         if (ret)
9679                 return ret;
9680
9681         inode = btrfs_new_inode(trans, root, dir,
9682                                 dentry->d_name.name,
9683                                 dentry->d_name.len,
9684                                 btrfs_ino(dir),
9685                                 objectid,
9686                                 S_IFCHR | WHITEOUT_MODE,
9687                                 &index);
9688
9689         if (IS_ERR(inode)) {
9690                 ret = PTR_ERR(inode);
9691                 return ret;
9692         }
9693
9694         inode->i_op = &btrfs_special_inode_operations;
9695         init_special_inode(inode, inode->i_mode,
9696                 WHITEOUT_DEV);
9697
9698         ret = btrfs_init_inode_security(trans, inode, dir,
9699                                 &dentry->d_name);
9700         if (ret)
9701                 goto out;
9702
9703         ret = btrfs_add_nondir(trans, dir, dentry,
9704                                 inode, 0, index);
9705         if (ret)
9706                 goto out;
9707
9708         ret = btrfs_update_inode(trans, root, inode);
9709 out:
9710         unlock_new_inode(inode);
9711         if (ret)
9712                 inode_dec_link_count(inode);
9713         iput(inode);
9714
9715         return ret;
9716 }
9717
9718 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9719                            struct inode *new_dir, struct dentry *new_dentry,
9720                            unsigned int flags)
9721 {
9722         struct btrfs_trans_handle *trans;
9723         unsigned int trans_num_items;
9724         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9725         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9726         struct inode *new_inode = d_inode(new_dentry);
9727         struct inode *old_inode = d_inode(old_dentry);
9728         u64 index = 0;
9729         u64 root_objectid;
9730         int ret;
9731         u64 old_ino = btrfs_ino(old_inode);
9732         bool log_pinned = false;
9733
9734         if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9735                 return -EPERM;
9736
9737         /* we only allow rename subvolume link between subvolumes */
9738         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9739                 return -EXDEV;
9740
9741         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9742             (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
9743                 return -ENOTEMPTY;
9744
9745         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9746             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9747                 return -ENOTEMPTY;
9748
9749
9750         /* check for collisions, even if the  name isn't there */
9751         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9752                              new_dentry->d_name.name,
9753                              new_dentry->d_name.len);
9754
9755         if (ret) {
9756                 if (ret == -EEXIST) {
9757                         /* we shouldn't get
9758                          * eexist without a new_inode */
9759                         if (WARN_ON(!new_inode)) {
9760                                 return ret;
9761                         }
9762                 } else {
9763                         /* maybe -EOVERFLOW */
9764                         return ret;
9765                 }
9766         }
9767         ret = 0;
9768
9769         /*
9770          * we're using rename to replace one file with another.  Start IO on it
9771          * now so  we don't add too much work to the end of the transaction
9772          */
9773         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9774                 filemap_flush(old_inode->i_mapping);
9775
9776         /* close the racy window with snapshot create/destroy ioctl */
9777         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9778                 down_read(&root->fs_info->subvol_sem);
9779         /*
9780          * We want to reserve the absolute worst case amount of items.  So if
9781          * both inodes are subvols and we need to unlink them then that would
9782          * require 4 item modifications, but if they are both normal inodes it
9783          * would require 5 item modifications, so we'll assume they are normal
9784          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9785          * should cover the worst case number of items we'll modify.
9786          * If our rename has the whiteout flag, we need more 5 units for the
9787          * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
9788          * when selinux is enabled).
9789          */
9790         trans_num_items = 11;
9791         if (flags & RENAME_WHITEOUT)
9792                 trans_num_items += 5;
9793         trans = btrfs_start_transaction(root, trans_num_items);
9794         if (IS_ERR(trans)) {
9795                 ret = PTR_ERR(trans);
9796                 goto out_notrans;
9797         }
9798
9799         if (dest != root)
9800                 btrfs_record_root_in_trans(trans, dest);
9801
9802         ret = btrfs_set_inode_index(new_dir, &index);
9803         if (ret)
9804                 goto out_fail;
9805
9806         BTRFS_I(old_inode)->dir_index = 0ULL;
9807         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9808                 /* force full log commit if subvolume involved. */
9809                 btrfs_set_log_full_commit(root->fs_info, trans);
9810         } else {
9811                 btrfs_pin_log_trans(root);
9812                 log_pinned = true;
9813                 ret = btrfs_insert_inode_ref(trans, dest,
9814                                              new_dentry->d_name.name,
9815                                              new_dentry->d_name.len,
9816                                              old_ino,
9817                                              btrfs_ino(new_dir), index);
9818                 if (ret)
9819                         goto out_fail;
9820         }
9821
9822         inode_inc_iversion(old_dir);
9823         inode_inc_iversion(new_dir);
9824         inode_inc_iversion(old_inode);
9825         old_dir->i_ctime = old_dir->i_mtime =
9826         new_dir->i_ctime = new_dir->i_mtime =
9827         old_inode->i_ctime = current_fs_time(old_dir->i_sb);
9828
9829         if (old_dentry->d_parent != new_dentry->d_parent)
9830                 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
9831
9832         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9833                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9834                 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9835                                         old_dentry->d_name.name,
9836                                         old_dentry->d_name.len);
9837         } else {
9838                 ret = __btrfs_unlink_inode(trans, root, old_dir,
9839                                         d_inode(old_dentry),
9840                                         old_dentry->d_name.name,
9841                                         old_dentry->d_name.len);
9842                 if (!ret)
9843                         ret = btrfs_update_inode(trans, root, old_inode);
9844         }
9845         if (ret) {
9846                 btrfs_abort_transaction(trans, root, ret);
9847                 goto out_fail;
9848         }
9849
9850         if (new_inode) {
9851                 inode_inc_iversion(new_inode);
9852                 new_inode->i_ctime = current_fs_time(new_inode->i_sb);
9853                 if (unlikely(btrfs_ino(new_inode) ==
9854                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9855                         root_objectid = BTRFS_I(new_inode)->location.objectid;
9856                         ret = btrfs_unlink_subvol(trans, dest, new_dir,
9857                                                 root_objectid,
9858                                                 new_dentry->d_name.name,
9859                                                 new_dentry->d_name.len);
9860                         BUG_ON(new_inode->i_nlink == 0);
9861                 } else {
9862                         ret = btrfs_unlink_inode(trans, dest, new_dir,
9863                                                  d_inode(new_dentry),
9864                                                  new_dentry->d_name.name,
9865                                                  new_dentry->d_name.len);
9866                 }
9867                 if (!ret && new_inode->i_nlink == 0)
9868                         ret = btrfs_orphan_add(trans, d_inode(new_dentry));
9869                 if (ret) {
9870                         btrfs_abort_transaction(trans, root, ret);
9871                         goto out_fail;
9872                 }
9873         }
9874
9875         ret = btrfs_add_link(trans, new_dir, old_inode,
9876                              new_dentry->d_name.name,
9877                              new_dentry->d_name.len, 0, index);
9878         if (ret) {
9879                 btrfs_abort_transaction(trans, root, ret);
9880                 goto out_fail;
9881         }
9882
9883         if (old_inode->i_nlink == 1)
9884                 BTRFS_I(old_inode)->dir_index = index;
9885
9886         if (log_pinned) {
9887                 struct dentry *parent = new_dentry->d_parent;
9888
9889                 btrfs_log_new_name(trans, old_inode, old_dir, parent);
9890                 btrfs_end_log_trans(root);
9891                 log_pinned = false;
9892         }
9893
9894         if (flags & RENAME_WHITEOUT) {
9895                 ret = btrfs_whiteout_for_rename(trans, root, old_dir,
9896                                                 old_dentry);
9897
9898                 if (ret) {
9899                         btrfs_abort_transaction(trans, root, ret);
9900                         goto out_fail;
9901                 }
9902         }
9903 out_fail:
9904         /*
9905          * If we have pinned the log and an error happened, we unpin tasks
9906          * trying to sync the log and force them to fallback to a transaction
9907          * commit if the log currently contains any of the inodes involved in
9908          * this rename operation (to ensure we do not persist a log with an
9909          * inconsistent state for any of these inodes or leading to any
9910          * inconsistencies when replayed). If the transaction was aborted, the
9911          * abortion reason is propagated to userspace when attempting to commit
9912          * the transaction. If the log does not contain any of these inodes, we
9913          * allow the tasks to sync it.
9914          */
9915         if (ret && log_pinned) {
9916                 if (btrfs_inode_in_log(old_dir, root->fs_info->generation) ||
9917                     btrfs_inode_in_log(new_dir, root->fs_info->generation) ||
9918                     btrfs_inode_in_log(old_inode, root->fs_info->generation) ||
9919                     (new_inode &&
9920                      btrfs_inode_in_log(new_inode, root->fs_info->generation)))
9921                     btrfs_set_log_full_commit(root->fs_info, trans);
9922
9923                 btrfs_end_log_trans(root);
9924                 log_pinned = false;
9925         }
9926         btrfs_end_transaction(trans, root);
9927 out_notrans:
9928         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9929                 up_read(&root->fs_info->subvol_sem);
9930
9931         return ret;
9932 }
9933
9934 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9935                          struct inode *new_dir, struct dentry *new_dentry,
9936                          unsigned int flags)
9937 {
9938         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
9939                 return -EINVAL;
9940
9941         if (flags & RENAME_EXCHANGE)
9942                 return btrfs_rename_exchange(old_dir, old_dentry, new_dir,
9943                                           new_dentry);
9944
9945         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
9946 }
9947
9948 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9949 {
9950         struct btrfs_delalloc_work *delalloc_work;
9951         struct inode *inode;
9952
9953         delalloc_work = container_of(work, struct btrfs_delalloc_work,
9954                                      work);
9955         inode = delalloc_work->inode;
9956         filemap_flush(inode->i_mapping);
9957         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9958                                 &BTRFS_I(inode)->runtime_flags))
9959                 filemap_flush(inode->i_mapping);
9960
9961         if (delalloc_work->delay_iput)
9962                 btrfs_add_delayed_iput(inode);
9963         else
9964                 iput(inode);
9965         complete(&delalloc_work->completion);
9966 }
9967
9968 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9969                                                     int delay_iput)
9970 {
9971         struct btrfs_delalloc_work *work;
9972
9973         work = kmalloc(sizeof(*work), GFP_NOFS);
9974         if (!work)
9975                 return NULL;
9976
9977         init_completion(&work->completion);
9978         INIT_LIST_HEAD(&work->list);
9979         work->inode = inode;
9980         work->delay_iput = delay_iput;
9981         WARN_ON_ONCE(!inode);
9982         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
9983                         btrfs_run_delalloc_work, NULL, NULL);
9984
9985         return work;
9986 }
9987
9988 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
9989 {
9990         wait_for_completion(&work->completion);
9991         kfree(work);
9992 }
9993
9994 /*
9995  * some fairly slow code that needs optimization. This walks the list
9996  * of all the inodes with pending delalloc and forces them to disk.
9997  */
9998 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
9999                                    int nr)
10000 {
10001         struct btrfs_inode *binode;
10002         struct inode *inode;
10003         struct btrfs_delalloc_work *work, *next;
10004         struct list_head works;
10005         struct list_head splice;
10006         int ret = 0;
10007
10008         INIT_LIST_HEAD(&works);
10009         INIT_LIST_HEAD(&splice);
10010
10011         mutex_lock(&root->delalloc_mutex);
10012         spin_lock(&root->delalloc_lock);
10013         list_splice_init(&root->delalloc_inodes, &splice);
10014         while (!list_empty(&splice)) {
10015                 binode = list_entry(splice.next, struct btrfs_inode,
10016                                     delalloc_inodes);
10017
10018                 list_move_tail(&binode->delalloc_inodes,
10019                                &root->delalloc_inodes);
10020                 inode = igrab(&binode->vfs_inode);
10021                 if (!inode) {
10022                         cond_resched_lock(&root->delalloc_lock);
10023                         continue;
10024                 }
10025                 spin_unlock(&root->delalloc_lock);
10026
10027                 work = btrfs_alloc_delalloc_work(inode, delay_iput);
10028                 if (!work) {
10029                         if (delay_iput)
10030                                 btrfs_add_delayed_iput(inode);
10031                         else
10032                                 iput(inode);
10033                         ret = -ENOMEM;
10034                         goto out;
10035                 }
10036                 list_add_tail(&work->list, &works);
10037                 btrfs_queue_work(root->fs_info->flush_workers,
10038                                  &work->work);
10039                 ret++;
10040                 if (nr != -1 && ret >= nr)
10041                         goto out;
10042                 cond_resched();
10043                 spin_lock(&root->delalloc_lock);
10044         }
10045         spin_unlock(&root->delalloc_lock);
10046
10047 out:
10048         list_for_each_entry_safe(work, next, &works, list) {
10049                 list_del_init(&work->list);
10050                 btrfs_wait_and_free_delalloc_work(work);
10051         }
10052
10053         if (!list_empty_careful(&splice)) {
10054                 spin_lock(&root->delalloc_lock);
10055                 list_splice_tail(&splice, &root->delalloc_inodes);
10056                 spin_unlock(&root->delalloc_lock);
10057         }
10058         mutex_unlock(&root->delalloc_mutex);
10059         return ret;
10060 }
10061
10062 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
10063 {
10064         int ret;
10065
10066         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
10067                 return -EROFS;
10068
10069         ret = __start_delalloc_inodes(root, delay_iput, -1);
10070         if (ret > 0)
10071                 ret = 0;
10072         /*
10073          * the filemap_flush will queue IO into the worker threads, but
10074          * we have to make sure the IO is actually started and that
10075          * ordered extents get created before we return
10076          */
10077         atomic_inc(&root->fs_info->async_submit_draining);
10078         while (atomic_read(&root->fs_info->nr_async_submits) ||
10079               atomic_read(&root->fs_info->async_delalloc_pages)) {
10080                 wait_event(root->fs_info->async_submit_wait,
10081                    (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
10082                     atomic_read(&root->fs_info->async_delalloc_pages) == 0));
10083         }
10084         atomic_dec(&root->fs_info->async_submit_draining);
10085         return ret;
10086 }
10087
10088 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
10089                                int nr)
10090 {
10091         struct btrfs_root *root;
10092         struct list_head splice;
10093         int ret;
10094
10095         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
10096                 return -EROFS;
10097
10098         INIT_LIST_HEAD(&splice);
10099
10100         mutex_lock(&fs_info->delalloc_root_mutex);
10101         spin_lock(&fs_info->delalloc_root_lock);
10102         list_splice_init(&fs_info->delalloc_roots, &splice);
10103         while (!list_empty(&splice) && nr) {
10104                 root = list_first_entry(&splice, struct btrfs_root,
10105                                         delalloc_root);
10106                 root = btrfs_grab_fs_root(root);
10107                 BUG_ON(!root);
10108                 list_move_tail(&root->delalloc_root,
10109                                &fs_info->delalloc_roots);
10110                 spin_unlock(&fs_info->delalloc_root_lock);
10111
10112                 ret = __start_delalloc_inodes(root, delay_iput, nr);
10113                 btrfs_put_fs_root(root);
10114                 if (ret < 0)
10115                         goto out;
10116
10117                 if (nr != -1) {
10118                         nr -= ret;
10119                         WARN_ON(nr < 0);
10120                 }
10121                 spin_lock(&fs_info->delalloc_root_lock);
10122         }
10123         spin_unlock(&fs_info->delalloc_root_lock);
10124
10125         ret = 0;
10126         atomic_inc(&fs_info->async_submit_draining);
10127         while (atomic_read(&fs_info->nr_async_submits) ||
10128               atomic_read(&fs_info->async_delalloc_pages)) {
10129                 wait_event(fs_info->async_submit_wait,
10130                    (atomic_read(&fs_info->nr_async_submits) == 0 &&
10131                     atomic_read(&fs_info->async_delalloc_pages) == 0));
10132         }
10133         atomic_dec(&fs_info->async_submit_draining);
10134 out:
10135         if (!list_empty_careful(&splice)) {
10136                 spin_lock(&fs_info->delalloc_root_lock);
10137                 list_splice_tail(&splice, &fs_info->delalloc_roots);
10138                 spin_unlock(&fs_info->delalloc_root_lock);
10139         }
10140         mutex_unlock(&fs_info->delalloc_root_mutex);
10141         return ret;
10142 }
10143
10144 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
10145                          const char *symname)
10146 {
10147         struct btrfs_trans_handle *trans;
10148         struct btrfs_root *root = BTRFS_I(dir)->root;
10149         struct btrfs_path *path;
10150         struct btrfs_key key;
10151         struct inode *inode = NULL;
10152         int err;
10153         int drop_inode = 0;
10154         u64 objectid;
10155         u64 index = 0;
10156         int name_len;
10157         int datasize;
10158         unsigned long ptr;
10159         struct btrfs_file_extent_item *ei;
10160         struct extent_buffer *leaf;
10161
10162         name_len = strlen(symname);
10163         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
10164                 return -ENAMETOOLONG;
10165
10166         /*
10167          * 2 items for inode item and ref
10168          * 2 items for dir items
10169          * 1 item for updating parent inode item
10170          * 1 item for the inline extent item
10171          * 1 item for xattr if selinux is on
10172          */
10173         trans = btrfs_start_transaction(root, 7);
10174         if (IS_ERR(trans))
10175                 return PTR_ERR(trans);
10176
10177         err = btrfs_find_free_ino(root, &objectid);
10178         if (err)
10179                 goto out_unlock;
10180
10181         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
10182                                 dentry->d_name.len, btrfs_ino(dir), objectid,
10183                                 S_IFLNK|S_IRWXUGO, &index);
10184         if (IS_ERR(inode)) {
10185                 err = PTR_ERR(inode);
10186                 goto out_unlock;
10187         }
10188
10189         /*
10190         * If the active LSM wants to access the inode during
10191         * d_instantiate it needs these. Smack checks to see
10192         * if the filesystem supports xattrs by looking at the
10193         * ops vector.
10194         */
10195         inode->i_fop = &btrfs_file_operations;
10196         inode->i_op = &btrfs_file_inode_operations;
10197         inode->i_mapping->a_ops = &btrfs_aops;
10198         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10199
10200         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
10201         if (err)
10202                 goto out_unlock_inode;
10203
10204         path = btrfs_alloc_path();
10205         if (!path) {
10206                 err = -ENOMEM;
10207                 goto out_unlock_inode;
10208         }
10209         key.objectid = btrfs_ino(inode);
10210         key.offset = 0;
10211         key.type = BTRFS_EXTENT_DATA_KEY;
10212         datasize = btrfs_file_extent_calc_inline_size(name_len);
10213         err = btrfs_insert_empty_item(trans, root, path, &key,
10214                                       datasize);
10215         if (err) {
10216                 btrfs_free_path(path);
10217                 goto out_unlock_inode;
10218         }
10219         leaf = path->nodes[0];
10220         ei = btrfs_item_ptr(leaf, path->slots[0],
10221                             struct btrfs_file_extent_item);
10222         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
10223         btrfs_set_file_extent_type(leaf, ei,
10224                                    BTRFS_FILE_EXTENT_INLINE);
10225         btrfs_set_file_extent_encryption(leaf, ei, 0);
10226         btrfs_set_file_extent_compression(leaf, ei, 0);
10227         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
10228         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
10229
10230         ptr = btrfs_file_extent_inline_start(ei);
10231         write_extent_buffer(leaf, symname, ptr, name_len);
10232         btrfs_mark_buffer_dirty(leaf);
10233         btrfs_free_path(path);
10234
10235         inode->i_op = &btrfs_symlink_inode_operations;
10236         inode_nohighmem(inode);
10237         inode->i_mapping->a_ops = &btrfs_symlink_aops;
10238         inode_set_bytes(inode, name_len);
10239         btrfs_i_size_write(inode, name_len);
10240         err = btrfs_update_inode(trans, root, inode);
10241         /*
10242          * Last step, add directory indexes for our symlink inode. This is the
10243          * last step to avoid extra cleanup of these indexes if an error happens
10244          * elsewhere above.
10245          */
10246         if (!err)
10247                 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
10248         if (err) {
10249                 drop_inode = 1;
10250                 goto out_unlock_inode;
10251         }
10252
10253         unlock_new_inode(inode);
10254         d_instantiate(dentry, inode);
10255
10256 out_unlock:
10257         btrfs_end_transaction(trans, root);
10258         if (drop_inode) {
10259                 inode_dec_link_count(inode);
10260                 iput(inode);
10261         }
10262         btrfs_btree_balance_dirty(root);
10263         return err;
10264
10265 out_unlock_inode:
10266         drop_inode = 1;
10267         unlock_new_inode(inode);
10268         goto out_unlock;
10269 }
10270
10271 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
10272                                        u64 start, u64 num_bytes, u64 min_size,
10273                                        loff_t actual_len, u64 *alloc_hint,
10274                                        struct btrfs_trans_handle *trans)
10275 {
10276         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
10277         struct extent_map *em;
10278         struct btrfs_root *root = BTRFS_I(inode)->root;
10279         struct btrfs_key ins;
10280         u64 cur_offset = start;
10281         u64 i_size;
10282         u64 cur_bytes;
10283         u64 last_alloc = (u64)-1;
10284         int ret = 0;
10285         bool own_trans = true;
10286
10287         if (trans)
10288                 own_trans = false;
10289         while (num_bytes > 0) {
10290                 if (own_trans) {
10291                         trans = btrfs_start_transaction(root, 3);
10292                         if (IS_ERR(trans)) {
10293                                 ret = PTR_ERR(trans);
10294                                 break;
10295                         }
10296                 }
10297
10298                 cur_bytes = min_t(u64, num_bytes, SZ_256M);
10299                 cur_bytes = max(cur_bytes, min_size);
10300                 /*
10301                  * If we are severely fragmented we could end up with really
10302                  * small allocations, so if the allocator is returning small
10303                  * chunks lets make its job easier by only searching for those
10304                  * sized chunks.
10305                  */
10306                 cur_bytes = min(cur_bytes, last_alloc);
10307                 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
10308                                            *alloc_hint, &ins, 1, 0);
10309                 if (ret) {
10310                         if (own_trans)
10311                                 btrfs_end_transaction(trans, root);
10312                         break;
10313                 }
10314                 btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
10315
10316                 last_alloc = ins.offset;
10317                 ret = insert_reserved_file_extent(trans, inode,
10318                                                   cur_offset, ins.objectid,
10319                                                   ins.offset, ins.offset,
10320                                                   ins.offset, 0, 0, 0,
10321                                                   BTRFS_FILE_EXTENT_PREALLOC);
10322                 if (ret) {
10323                         btrfs_free_reserved_extent(root, ins.objectid,
10324                                                    ins.offset, 0);
10325                         btrfs_abort_transaction(trans, root, ret);
10326                         if (own_trans)
10327                                 btrfs_end_transaction(trans, root);
10328                         break;
10329                 }
10330
10331                 btrfs_drop_extent_cache(inode, cur_offset,
10332                                         cur_offset + ins.offset -1, 0);
10333
10334                 em = alloc_extent_map();
10335                 if (!em) {
10336                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
10337                                 &BTRFS_I(inode)->runtime_flags);
10338                         goto next;
10339                 }
10340
10341                 em->start = cur_offset;
10342                 em->orig_start = cur_offset;
10343                 em->len = ins.offset;
10344                 em->block_start = ins.objectid;
10345                 em->block_len = ins.offset;
10346                 em->orig_block_len = ins.offset;
10347                 em->ram_bytes = ins.offset;
10348                 em->bdev = root->fs_info->fs_devices->latest_bdev;
10349                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
10350                 em->generation = trans->transid;
10351
10352                 while (1) {
10353                         write_lock(&em_tree->lock);
10354                         ret = add_extent_mapping(em_tree, em, 1);
10355                         write_unlock(&em_tree->lock);
10356                         if (ret != -EEXIST)
10357                                 break;
10358                         btrfs_drop_extent_cache(inode, cur_offset,
10359                                                 cur_offset + ins.offset - 1,
10360                                                 0);
10361                 }
10362                 free_extent_map(em);
10363 next:
10364                 num_bytes -= ins.offset;
10365                 cur_offset += ins.offset;
10366                 *alloc_hint = ins.objectid + ins.offset;
10367
10368                 inode_inc_iversion(inode);
10369                 inode->i_ctime = current_fs_time(inode->i_sb);
10370                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
10371                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
10372                     (actual_len > inode->i_size) &&
10373                     (cur_offset > inode->i_size)) {
10374                         if (cur_offset > actual_len)
10375                                 i_size = actual_len;
10376                         else
10377                                 i_size = cur_offset;
10378                         i_size_write(inode, i_size);
10379                         btrfs_ordered_update_i_size(inode, i_size, NULL);
10380                 }
10381
10382                 ret = btrfs_update_inode(trans, root, inode);
10383
10384                 if (ret) {
10385                         btrfs_abort_transaction(trans, root, ret);
10386                         if (own_trans)
10387                                 btrfs_end_transaction(trans, root);
10388                         break;
10389                 }
10390
10391                 if (own_trans)
10392                         btrfs_end_transaction(trans, root);
10393         }
10394         return ret;
10395 }
10396
10397 int btrfs_prealloc_file_range(struct inode *inode, int mode,
10398                               u64 start, u64 num_bytes, u64 min_size,
10399                               loff_t actual_len, u64 *alloc_hint)
10400 {
10401         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10402                                            min_size, actual_len, alloc_hint,
10403                                            NULL);
10404 }
10405
10406 int btrfs_prealloc_file_range_trans(struct inode *inode,
10407                                     struct btrfs_trans_handle *trans, int mode,
10408                                     u64 start, u64 num_bytes, u64 min_size,
10409                                     loff_t actual_len, u64 *alloc_hint)
10410 {
10411         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
10412                                            min_size, actual_len, alloc_hint, trans);
10413 }
10414
10415 static int btrfs_set_page_dirty(struct page *page)
10416 {
10417         return __set_page_dirty_nobuffers(page);
10418 }
10419
10420 static int btrfs_permission(struct inode *inode, int mask)
10421 {
10422         struct btrfs_root *root = BTRFS_I(inode)->root;
10423         umode_t mode = inode->i_mode;
10424
10425         if (mask & MAY_WRITE &&
10426             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
10427                 if (btrfs_root_readonly(root))
10428                         return -EROFS;
10429                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
10430                         return -EACCES;
10431         }
10432         return generic_permission(inode, mask);
10433 }
10434
10435 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
10436 {
10437         struct btrfs_trans_handle *trans;
10438         struct btrfs_root *root = BTRFS_I(dir)->root;
10439         struct inode *inode = NULL;
10440         u64 objectid;
10441         u64 index;
10442         int ret = 0;
10443
10444         /*
10445          * 5 units required for adding orphan entry
10446          */
10447         trans = btrfs_start_transaction(root, 5);
10448         if (IS_ERR(trans))
10449                 return PTR_ERR(trans);
10450
10451         ret = btrfs_find_free_ino(root, &objectid);
10452         if (ret)
10453                 goto out;
10454
10455         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
10456                                 btrfs_ino(dir), objectid, mode, &index);
10457         if (IS_ERR(inode)) {
10458                 ret = PTR_ERR(inode);
10459                 inode = NULL;
10460                 goto out;
10461         }
10462
10463         inode->i_fop = &btrfs_file_operations;
10464         inode->i_op = &btrfs_file_inode_operations;
10465
10466         inode->i_mapping->a_ops = &btrfs_aops;
10467         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
10468
10469         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
10470         if (ret)
10471                 goto out_inode;
10472
10473         ret = btrfs_update_inode(trans, root, inode);
10474         if (ret)
10475                 goto out_inode;
10476         ret = btrfs_orphan_add(trans, inode);
10477         if (ret)
10478                 goto out_inode;
10479
10480         /*
10481          * We set number of links to 0 in btrfs_new_inode(), and here we set
10482          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10483          * through:
10484          *
10485          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10486          */
10487         set_nlink(inode, 1);
10488         unlock_new_inode(inode);
10489         d_tmpfile(dentry, inode);
10490         mark_inode_dirty(inode);
10491
10492 out:
10493         btrfs_end_transaction(trans, root);
10494         if (ret)
10495                 iput(inode);
10496         btrfs_balance_delayed_items(root);
10497         btrfs_btree_balance_dirty(root);
10498         return ret;
10499
10500 out_inode:
10501         unlock_new_inode(inode);
10502         goto out;
10503
10504 }
10505
10506 /* Inspired by filemap_check_errors() */
10507 int btrfs_inode_check_errors(struct inode *inode)
10508 {
10509         int ret = 0;
10510
10511         if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
10512             test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
10513                 ret = -ENOSPC;
10514         if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
10515             test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
10516                 ret = -EIO;
10517
10518         return ret;
10519 }
10520
10521 static const struct inode_operations btrfs_dir_inode_operations = {
10522         .getattr        = btrfs_getattr,
10523         .lookup         = btrfs_lookup,
10524         .create         = btrfs_create,
10525         .unlink         = btrfs_unlink,
10526         .link           = btrfs_link,
10527         .mkdir          = btrfs_mkdir,
10528         .rmdir          = btrfs_rmdir,
10529         .rename2        = btrfs_rename2,
10530         .symlink        = btrfs_symlink,
10531         .setattr        = btrfs_setattr,
10532         .mknod          = btrfs_mknod,
10533         .setxattr       = generic_setxattr,
10534         .getxattr       = generic_getxattr,
10535         .listxattr      = btrfs_listxattr,
10536         .removexattr    = generic_removexattr,
10537         .permission     = btrfs_permission,
10538         .get_acl        = btrfs_get_acl,
10539         .set_acl        = btrfs_set_acl,
10540         .update_time    = btrfs_update_time,
10541         .tmpfile        = btrfs_tmpfile,
10542 };
10543 static const struct inode_operations btrfs_dir_ro_inode_operations = {
10544         .lookup         = btrfs_lookup,
10545         .permission     = btrfs_permission,
10546         .get_acl        = btrfs_get_acl,
10547         .set_acl        = btrfs_set_acl,
10548         .update_time    = btrfs_update_time,
10549 };
10550
10551 static const struct file_operations btrfs_dir_file_operations = {
10552         .llseek         = generic_file_llseek,
10553         .read           = generic_read_dir,
10554         .iterate_shared = btrfs_real_readdir,
10555         .unlocked_ioctl = btrfs_ioctl,
10556 #ifdef CONFIG_COMPAT
10557         .compat_ioctl   = btrfs_compat_ioctl,
10558 #endif
10559         .release        = btrfs_release_file,
10560         .fsync          = btrfs_sync_file,
10561 };
10562
10563 static const struct extent_io_ops btrfs_extent_io_ops = {
10564         .fill_delalloc = run_delalloc_range,
10565         .submit_bio_hook = btrfs_submit_bio_hook,
10566         .merge_bio_hook = btrfs_merge_bio_hook,
10567         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10568         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10569         .writepage_start_hook = btrfs_writepage_start_hook,
10570         .set_bit_hook = btrfs_set_bit_hook,
10571         .clear_bit_hook = btrfs_clear_bit_hook,
10572         .merge_extent_hook = btrfs_merge_extent_hook,
10573         .split_extent_hook = btrfs_split_extent_hook,
10574 };
10575
10576 /*
10577  * btrfs doesn't support the bmap operation because swapfiles
10578  * use bmap to make a mapping of extents in the file.  They assume
10579  * these extents won't change over the life of the file and they
10580  * use the bmap result to do IO directly to the drive.
10581  *
10582  * the btrfs bmap call would return logical addresses that aren't
10583  * suitable for IO and they also will change frequently as COW
10584  * operations happen.  So, swapfile + btrfs == corruption.
10585  *
10586  * For now we're avoiding this by dropping bmap.
10587  */
10588 static const struct address_space_operations btrfs_aops = {
10589         .readpage       = btrfs_readpage,
10590         .writepage      = btrfs_writepage,
10591         .writepages     = btrfs_writepages,
10592         .readpages      = btrfs_readpages,
10593         .direct_IO      = btrfs_direct_IO,
10594         .invalidatepage = btrfs_invalidatepage,
10595         .releasepage    = btrfs_releasepage,
10596         .set_page_dirty = btrfs_set_page_dirty,
10597         .error_remove_page = generic_error_remove_page,
10598 };
10599
10600 static const struct address_space_operations btrfs_symlink_aops = {
10601         .readpage       = btrfs_readpage,
10602         .writepage      = btrfs_writepage,
10603         .invalidatepage = btrfs_invalidatepage,
10604         .releasepage    = btrfs_releasepage,
10605 };
10606
10607 static const struct inode_operations btrfs_file_inode_operations = {
10608         .getattr        = btrfs_getattr,
10609         .setattr        = btrfs_setattr,
10610         .setxattr       = generic_setxattr,
10611         .getxattr       = generic_getxattr,
10612         .listxattr      = btrfs_listxattr,
10613         .removexattr    = generic_removexattr,
10614         .permission     = btrfs_permission,
10615         .fiemap         = btrfs_fiemap,
10616         .get_acl        = btrfs_get_acl,
10617         .set_acl        = btrfs_set_acl,
10618         .update_time    = btrfs_update_time,
10619 };
10620 static const struct inode_operations btrfs_special_inode_operations = {
10621         .getattr        = btrfs_getattr,
10622         .setattr        = btrfs_setattr,
10623         .permission     = btrfs_permission,
10624         .setxattr       = generic_setxattr,
10625         .getxattr       = generic_getxattr,
10626         .listxattr      = btrfs_listxattr,
10627         .removexattr    = generic_removexattr,
10628         .get_acl        = btrfs_get_acl,
10629         .set_acl        = btrfs_set_acl,
10630         .update_time    = btrfs_update_time,
10631 };
10632 static const struct inode_operations btrfs_symlink_inode_operations = {
10633         .readlink       = generic_readlink,
10634         .get_link       = page_get_link,
10635         .getattr        = btrfs_getattr,
10636         .setattr        = btrfs_setattr,
10637         .permission     = btrfs_permission,
10638         .setxattr       = generic_setxattr,
10639         .getxattr       = generic_getxattr,
10640         .listxattr      = btrfs_listxattr,
10641         .removexattr    = generic_removexattr,
10642         .update_time    = btrfs_update_time,
10643 };
10644
10645 const struct dentry_operations btrfs_dentry_operations = {
10646         .d_delete       = btrfs_dentry_delete,
10647         .d_release      = btrfs_dentry_release,
10648 };