Btrfs: fix number of transaction units required to create symlink
[cascardo/linux.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include <linux/btrfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/posix_acl_xattr.h>
45 #include <linux/uio.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 #include "hash.h"
61 #include "props.h"
62 #include "qgroup.h"
63
64 struct btrfs_iget_args {
65         struct btrfs_key *location;
66         struct btrfs_root *root;
67 };
68
69 struct btrfs_dio_data {
70         u64 outstanding_extents;
71         u64 reserve;
72         u64 unsubmitted_oe_range_start;
73         u64 unsubmitted_oe_range_end;
74 };
75
76 static const struct inode_operations btrfs_dir_inode_operations;
77 static const struct inode_operations btrfs_symlink_inode_operations;
78 static const struct inode_operations btrfs_dir_ro_inode_operations;
79 static const struct inode_operations btrfs_special_inode_operations;
80 static const struct inode_operations btrfs_file_inode_operations;
81 static const struct address_space_operations btrfs_aops;
82 static const struct address_space_operations btrfs_symlink_aops;
83 static const struct file_operations btrfs_dir_file_operations;
84 static struct extent_io_ops btrfs_extent_io_ops;
85
86 static struct kmem_cache *btrfs_inode_cachep;
87 static struct kmem_cache *btrfs_delalloc_work_cachep;
88 struct kmem_cache *btrfs_trans_handle_cachep;
89 struct kmem_cache *btrfs_transaction_cachep;
90 struct kmem_cache *btrfs_path_cachep;
91 struct kmem_cache *btrfs_free_space_cachep;
92
93 #define S_SHIFT 12
94 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
95         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
96         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
97         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
98         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
99         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
100         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
101         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
102 };
103
104 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
105 static int btrfs_truncate(struct inode *inode);
106 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
107 static noinline int cow_file_range(struct inode *inode,
108                                    struct page *locked_page,
109                                    u64 start, u64 end, int *page_started,
110                                    unsigned long *nr_written, int unlock);
111 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
112                                            u64 len, u64 orig_start,
113                                            u64 block_start, u64 block_len,
114                                            u64 orig_block_len, u64 ram_bytes,
115                                            int type);
116
117 static int btrfs_dirty_inode(struct inode *inode);
118
119 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
120 void btrfs_test_inode_set_ops(struct inode *inode)
121 {
122         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
123 }
124 #endif
125
126 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
127                                      struct inode *inode,  struct inode *dir,
128                                      const struct qstr *qstr)
129 {
130         int err;
131
132         err = btrfs_init_acl(trans, inode, dir);
133         if (!err)
134                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
135         return err;
136 }
137
138 /*
139  * this does all the hard work for inserting an inline extent into
140  * the btree.  The caller should have done a btrfs_drop_extents so that
141  * no overlapping inline items exist in the btree
142  */
143 static int insert_inline_extent(struct btrfs_trans_handle *trans,
144                                 struct btrfs_path *path, int extent_inserted,
145                                 struct btrfs_root *root, struct inode *inode,
146                                 u64 start, size_t size, size_t compressed_size,
147                                 int compress_type,
148                                 struct page **compressed_pages)
149 {
150         struct extent_buffer *leaf;
151         struct page *page = NULL;
152         char *kaddr;
153         unsigned long ptr;
154         struct btrfs_file_extent_item *ei;
155         int err = 0;
156         int ret;
157         size_t cur_size = size;
158         unsigned long offset;
159
160         if (compressed_size && compressed_pages)
161                 cur_size = compressed_size;
162
163         inode_add_bytes(inode, size);
164
165         if (!extent_inserted) {
166                 struct btrfs_key key;
167                 size_t datasize;
168
169                 key.objectid = btrfs_ino(inode);
170                 key.offset = start;
171                 key.type = BTRFS_EXTENT_DATA_KEY;
172
173                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
174                 path->leave_spinning = 1;
175                 ret = btrfs_insert_empty_item(trans, root, path, &key,
176                                               datasize);
177                 if (ret) {
178                         err = ret;
179                         goto fail;
180                 }
181         }
182         leaf = path->nodes[0];
183         ei = btrfs_item_ptr(leaf, path->slots[0],
184                             struct btrfs_file_extent_item);
185         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
186         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
187         btrfs_set_file_extent_encryption(leaf, ei, 0);
188         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
189         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
190         ptr = btrfs_file_extent_inline_start(ei);
191
192         if (compress_type != BTRFS_COMPRESS_NONE) {
193                 struct page *cpage;
194                 int i = 0;
195                 while (compressed_size > 0) {
196                         cpage = compressed_pages[i];
197                         cur_size = min_t(unsigned long, compressed_size,
198                                        PAGE_CACHE_SIZE);
199
200                         kaddr = kmap_atomic(cpage);
201                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
202                         kunmap_atomic(kaddr);
203
204                         i++;
205                         ptr += cur_size;
206                         compressed_size -= cur_size;
207                 }
208                 btrfs_set_file_extent_compression(leaf, ei,
209                                                   compress_type);
210         } else {
211                 page = find_get_page(inode->i_mapping,
212                                      start >> PAGE_CACHE_SHIFT);
213                 btrfs_set_file_extent_compression(leaf, ei, 0);
214                 kaddr = kmap_atomic(page);
215                 offset = start & (PAGE_CACHE_SIZE - 1);
216                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
217                 kunmap_atomic(kaddr);
218                 page_cache_release(page);
219         }
220         btrfs_mark_buffer_dirty(leaf);
221         btrfs_release_path(path);
222
223         /*
224          * we're an inline extent, so nobody can
225          * extend the file past i_size without locking
226          * a page we already have locked.
227          *
228          * We must do any isize and inode updates
229          * before we unlock the pages.  Otherwise we
230          * could end up racing with unlink.
231          */
232         BTRFS_I(inode)->disk_i_size = inode->i_size;
233         ret = btrfs_update_inode(trans, root, inode);
234
235         return ret;
236 fail:
237         return err;
238 }
239
240
241 /*
242  * conditionally insert an inline extent into the file.  This
243  * does the checks required to make sure the data is small enough
244  * to fit as an inline extent.
245  */
246 static noinline int cow_file_range_inline(struct btrfs_root *root,
247                                           struct inode *inode, u64 start,
248                                           u64 end, size_t compressed_size,
249                                           int compress_type,
250                                           struct page **compressed_pages)
251 {
252         struct btrfs_trans_handle *trans;
253         u64 isize = i_size_read(inode);
254         u64 actual_end = min(end + 1, isize);
255         u64 inline_len = actual_end - start;
256         u64 aligned_end = ALIGN(end, root->sectorsize);
257         u64 data_len = inline_len;
258         int ret;
259         struct btrfs_path *path;
260         int extent_inserted = 0;
261         u32 extent_item_size;
262
263         if (compressed_size)
264                 data_len = compressed_size;
265
266         if (start > 0 ||
267             actual_end > PAGE_CACHE_SIZE ||
268             data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
269             (!compressed_size &&
270             (actual_end & (root->sectorsize - 1)) == 0) ||
271             end + 1 < isize ||
272             data_len > root->fs_info->max_inline) {
273                 return 1;
274         }
275
276         path = btrfs_alloc_path();
277         if (!path)
278                 return -ENOMEM;
279
280         trans = btrfs_join_transaction(root);
281         if (IS_ERR(trans)) {
282                 btrfs_free_path(path);
283                 return PTR_ERR(trans);
284         }
285         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
286
287         if (compressed_size && compressed_pages)
288                 extent_item_size = btrfs_file_extent_calc_inline_size(
289                    compressed_size);
290         else
291                 extent_item_size = btrfs_file_extent_calc_inline_size(
292                     inline_len);
293
294         ret = __btrfs_drop_extents(trans, root, inode, path,
295                                    start, aligned_end, NULL,
296                                    1, 1, extent_item_size, &extent_inserted);
297         if (ret) {
298                 btrfs_abort_transaction(trans, root, ret);
299                 goto out;
300         }
301
302         if (isize > actual_end)
303                 inline_len = min_t(u64, isize, actual_end);
304         ret = insert_inline_extent(trans, path, extent_inserted,
305                                    root, inode, start,
306                                    inline_len, compressed_size,
307                                    compress_type, compressed_pages);
308         if (ret && ret != -ENOSPC) {
309                 btrfs_abort_transaction(trans, root, ret);
310                 goto out;
311         } else if (ret == -ENOSPC) {
312                 ret = 1;
313                 goto out;
314         }
315
316         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
317         btrfs_delalloc_release_metadata(inode, end + 1 - start);
318         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
319 out:
320         /*
321          * Don't forget to free the reserved space, as for inlined extent
322          * it won't count as data extent, free them directly here.
323          * And at reserve time, it's always aligned to page size, so
324          * just free one page here.
325          */
326         btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE);
327         btrfs_free_path(path);
328         btrfs_end_transaction(trans, root);
329         return ret;
330 }
331
332 struct async_extent {
333         u64 start;
334         u64 ram_size;
335         u64 compressed_size;
336         struct page **pages;
337         unsigned long nr_pages;
338         int compress_type;
339         struct list_head list;
340 };
341
342 struct async_cow {
343         struct inode *inode;
344         struct btrfs_root *root;
345         struct page *locked_page;
346         u64 start;
347         u64 end;
348         struct list_head extents;
349         struct btrfs_work work;
350 };
351
352 static noinline int add_async_extent(struct async_cow *cow,
353                                      u64 start, u64 ram_size,
354                                      u64 compressed_size,
355                                      struct page **pages,
356                                      unsigned long nr_pages,
357                                      int compress_type)
358 {
359         struct async_extent *async_extent;
360
361         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
362         BUG_ON(!async_extent); /* -ENOMEM */
363         async_extent->start = start;
364         async_extent->ram_size = ram_size;
365         async_extent->compressed_size = compressed_size;
366         async_extent->pages = pages;
367         async_extent->nr_pages = nr_pages;
368         async_extent->compress_type = compress_type;
369         list_add_tail(&async_extent->list, &cow->extents);
370         return 0;
371 }
372
373 static inline int inode_need_compress(struct inode *inode)
374 {
375         struct btrfs_root *root = BTRFS_I(inode)->root;
376
377         /* force compress */
378         if (btrfs_test_opt(root, FORCE_COMPRESS))
379                 return 1;
380         /* bad compression ratios */
381         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
382                 return 0;
383         if (btrfs_test_opt(root, COMPRESS) ||
384             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
385             BTRFS_I(inode)->force_compress)
386                 return 1;
387         return 0;
388 }
389
390 /*
391  * we create compressed extents in two phases.  The first
392  * phase compresses a range of pages that have already been
393  * locked (both pages and state bits are locked).
394  *
395  * This is done inside an ordered work queue, and the compression
396  * is spread across many cpus.  The actual IO submission is step
397  * two, and the ordered work queue takes care of making sure that
398  * happens in the same order things were put onto the queue by
399  * writepages and friends.
400  *
401  * If this code finds it can't get good compression, it puts an
402  * entry onto the work queue to write the uncompressed bytes.  This
403  * makes sure that both compressed inodes and uncompressed inodes
404  * are written in the same order that the flusher thread sent them
405  * down.
406  */
407 static noinline void compress_file_range(struct inode *inode,
408                                         struct page *locked_page,
409                                         u64 start, u64 end,
410                                         struct async_cow *async_cow,
411                                         int *num_added)
412 {
413         struct btrfs_root *root = BTRFS_I(inode)->root;
414         u64 num_bytes;
415         u64 blocksize = root->sectorsize;
416         u64 actual_end;
417         u64 isize = i_size_read(inode);
418         int ret = 0;
419         struct page **pages = NULL;
420         unsigned long nr_pages;
421         unsigned long nr_pages_ret = 0;
422         unsigned long total_compressed = 0;
423         unsigned long total_in = 0;
424         unsigned long max_compressed = 128 * 1024;
425         unsigned long max_uncompressed = 128 * 1024;
426         int i;
427         int will_compress;
428         int compress_type = root->fs_info->compress_type;
429         int redirty = 0;
430
431         /* if this is a small write inside eof, kick off a defrag */
432         if ((end - start + 1) < 16 * 1024 &&
433             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
434                 btrfs_add_inode_defrag(NULL, inode);
435
436         actual_end = min_t(u64, isize, end + 1);
437 again:
438         will_compress = 0;
439         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
440         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
441
442         /*
443          * we don't want to send crud past the end of i_size through
444          * compression, that's just a waste of CPU time.  So, if the
445          * end of the file is before the start of our current
446          * requested range of bytes, we bail out to the uncompressed
447          * cleanup code that can deal with all of this.
448          *
449          * It isn't really the fastest way to fix things, but this is a
450          * very uncommon corner.
451          */
452         if (actual_end <= start)
453                 goto cleanup_and_bail_uncompressed;
454
455         total_compressed = actual_end - start;
456
457         /*
458          * skip compression for a small file range(<=blocksize) that
459          * isn't an inline extent, since it dosen't save disk space at all.
460          */
461         if (total_compressed <= blocksize &&
462            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
463                 goto cleanup_and_bail_uncompressed;
464
465         /* we want to make sure that amount of ram required to uncompress
466          * an extent is reasonable, so we limit the total size in ram
467          * of a compressed extent to 128k.  This is a crucial number
468          * because it also controls how easily we can spread reads across
469          * cpus for decompression.
470          *
471          * We also want to make sure the amount of IO required to do
472          * a random read is reasonably small, so we limit the size of
473          * a compressed extent to 128k.
474          */
475         total_compressed = min(total_compressed, max_uncompressed);
476         num_bytes = ALIGN(end - start + 1, blocksize);
477         num_bytes = max(blocksize,  num_bytes);
478         total_in = 0;
479         ret = 0;
480
481         /*
482          * we do compression for mount -o compress and when the
483          * inode has not been flagged as nocompress.  This flag can
484          * change at any time if we discover bad compression ratios.
485          */
486         if (inode_need_compress(inode)) {
487                 WARN_ON(pages);
488                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
489                 if (!pages) {
490                         /* just bail out to the uncompressed code */
491                         goto cont;
492                 }
493
494                 if (BTRFS_I(inode)->force_compress)
495                         compress_type = BTRFS_I(inode)->force_compress;
496
497                 /*
498                  * we need to call clear_page_dirty_for_io on each
499                  * page in the range.  Otherwise applications with the file
500                  * mmap'd can wander in and change the page contents while
501                  * we are compressing them.
502                  *
503                  * If the compression fails for any reason, we set the pages
504                  * dirty again later on.
505                  */
506                 extent_range_clear_dirty_for_io(inode, start, end);
507                 redirty = 1;
508                 ret = btrfs_compress_pages(compress_type,
509                                            inode->i_mapping, start,
510                                            total_compressed, pages,
511                                            nr_pages, &nr_pages_ret,
512                                            &total_in,
513                                            &total_compressed,
514                                            max_compressed);
515
516                 if (!ret) {
517                         unsigned long offset = total_compressed &
518                                 (PAGE_CACHE_SIZE - 1);
519                         struct page *page = pages[nr_pages_ret - 1];
520                         char *kaddr;
521
522                         /* zero the tail end of the last page, we might be
523                          * sending it down to disk
524                          */
525                         if (offset) {
526                                 kaddr = kmap_atomic(page);
527                                 memset(kaddr + offset, 0,
528                                        PAGE_CACHE_SIZE - offset);
529                                 kunmap_atomic(kaddr);
530                         }
531                         will_compress = 1;
532                 }
533         }
534 cont:
535         if (start == 0) {
536                 /* lets try to make an inline extent */
537                 if (ret || total_in < (actual_end - start)) {
538                         /* we didn't compress the entire range, try
539                          * to make an uncompressed inline extent.
540                          */
541                         ret = cow_file_range_inline(root, inode, start, end,
542                                                     0, 0, NULL);
543                 } else {
544                         /* try making a compressed inline extent */
545                         ret = cow_file_range_inline(root, inode, start, end,
546                                                     total_compressed,
547                                                     compress_type, pages);
548                 }
549                 if (ret <= 0) {
550                         unsigned long clear_flags = EXTENT_DELALLOC |
551                                 EXTENT_DEFRAG;
552                         unsigned long page_error_op;
553
554                         clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
555                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
556
557                         /*
558                          * inline extent creation worked or returned error,
559                          * we don't need to create any more async work items.
560                          * Unlock and free up our temp pages.
561                          */
562                         extent_clear_unlock_delalloc(inode, start, end, NULL,
563                                                      clear_flags, PAGE_UNLOCK |
564                                                      PAGE_CLEAR_DIRTY |
565                                                      PAGE_SET_WRITEBACK |
566                                                      page_error_op |
567                                                      PAGE_END_WRITEBACK);
568                         goto free_pages_out;
569                 }
570         }
571
572         if (will_compress) {
573                 /*
574                  * we aren't doing an inline extent round the compressed size
575                  * up to a block size boundary so the allocator does sane
576                  * things
577                  */
578                 total_compressed = ALIGN(total_compressed, blocksize);
579
580                 /*
581                  * one last check to make sure the compression is really a
582                  * win, compare the page count read with the blocks on disk
583                  */
584                 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
585                 if (total_compressed >= total_in) {
586                         will_compress = 0;
587                 } else {
588                         num_bytes = total_in;
589                 }
590         }
591         if (!will_compress && pages) {
592                 /*
593                  * the compression code ran but failed to make things smaller,
594                  * free any pages it allocated and our page pointer array
595                  */
596                 for (i = 0; i < nr_pages_ret; i++) {
597                         WARN_ON(pages[i]->mapping);
598                         page_cache_release(pages[i]);
599                 }
600                 kfree(pages);
601                 pages = NULL;
602                 total_compressed = 0;
603                 nr_pages_ret = 0;
604
605                 /* flag the file so we don't compress in the future */
606                 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
607                     !(BTRFS_I(inode)->force_compress)) {
608                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
609                 }
610         }
611         if (will_compress) {
612                 *num_added += 1;
613
614                 /* the async work queues will take care of doing actual
615                  * allocation on disk for these compressed pages,
616                  * and will submit them to the elevator.
617                  */
618                 add_async_extent(async_cow, start, num_bytes,
619                                  total_compressed, pages, nr_pages_ret,
620                                  compress_type);
621
622                 if (start + num_bytes < end) {
623                         start += num_bytes;
624                         pages = NULL;
625                         cond_resched();
626                         goto again;
627                 }
628         } else {
629 cleanup_and_bail_uncompressed:
630                 /*
631                  * No compression, but we still need to write the pages in
632                  * the file we've been given so far.  redirty the locked
633                  * page if it corresponds to our extent and set things up
634                  * for the async work queue to run cow_file_range to do
635                  * the normal delalloc dance
636                  */
637                 if (page_offset(locked_page) >= start &&
638                     page_offset(locked_page) <= end) {
639                         __set_page_dirty_nobuffers(locked_page);
640                         /* unlocked later on in the async handlers */
641                 }
642                 if (redirty)
643                         extent_range_redirty_for_io(inode, start, end);
644                 add_async_extent(async_cow, start, end - start + 1,
645                                  0, NULL, 0, BTRFS_COMPRESS_NONE);
646                 *num_added += 1;
647         }
648
649         return;
650
651 free_pages_out:
652         for (i = 0; i < nr_pages_ret; i++) {
653                 WARN_ON(pages[i]->mapping);
654                 page_cache_release(pages[i]);
655         }
656         kfree(pages);
657 }
658
659 static void free_async_extent_pages(struct async_extent *async_extent)
660 {
661         int i;
662
663         if (!async_extent->pages)
664                 return;
665
666         for (i = 0; i < async_extent->nr_pages; i++) {
667                 WARN_ON(async_extent->pages[i]->mapping);
668                 page_cache_release(async_extent->pages[i]);
669         }
670         kfree(async_extent->pages);
671         async_extent->nr_pages = 0;
672         async_extent->pages = NULL;
673 }
674
675 /*
676  * phase two of compressed writeback.  This is the ordered portion
677  * of the code, which only gets called in the order the work was
678  * queued.  We walk all the async extents created by compress_file_range
679  * and send them down to the disk.
680  */
681 static noinline void submit_compressed_extents(struct inode *inode,
682                                               struct async_cow *async_cow)
683 {
684         struct async_extent *async_extent;
685         u64 alloc_hint = 0;
686         struct btrfs_key ins;
687         struct extent_map *em;
688         struct btrfs_root *root = BTRFS_I(inode)->root;
689         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
690         struct extent_io_tree *io_tree;
691         int ret = 0;
692
693 again:
694         while (!list_empty(&async_cow->extents)) {
695                 async_extent = list_entry(async_cow->extents.next,
696                                           struct async_extent, list);
697                 list_del(&async_extent->list);
698
699                 io_tree = &BTRFS_I(inode)->io_tree;
700
701 retry:
702                 /* did the compression code fall back to uncompressed IO? */
703                 if (!async_extent->pages) {
704                         int page_started = 0;
705                         unsigned long nr_written = 0;
706
707                         lock_extent(io_tree, async_extent->start,
708                                          async_extent->start +
709                                          async_extent->ram_size - 1);
710
711                         /* allocate blocks */
712                         ret = cow_file_range(inode, async_cow->locked_page,
713                                              async_extent->start,
714                                              async_extent->start +
715                                              async_extent->ram_size - 1,
716                                              &page_started, &nr_written, 0);
717
718                         /* JDM XXX */
719
720                         /*
721                          * if page_started, cow_file_range inserted an
722                          * inline extent and took care of all the unlocking
723                          * and IO for us.  Otherwise, we need to submit
724                          * all those pages down to the drive.
725                          */
726                         if (!page_started && !ret)
727                                 extent_write_locked_range(io_tree,
728                                                   inode, async_extent->start,
729                                                   async_extent->start +
730                                                   async_extent->ram_size - 1,
731                                                   btrfs_get_extent,
732                                                   WB_SYNC_ALL);
733                         else if (ret)
734                                 unlock_page(async_cow->locked_page);
735                         kfree(async_extent);
736                         cond_resched();
737                         continue;
738                 }
739
740                 lock_extent(io_tree, async_extent->start,
741                             async_extent->start + async_extent->ram_size - 1);
742
743                 ret = btrfs_reserve_extent(root,
744                                            async_extent->compressed_size,
745                                            async_extent->compressed_size,
746                                            0, alloc_hint, &ins, 1, 1);
747                 if (ret) {
748                         free_async_extent_pages(async_extent);
749
750                         if (ret == -ENOSPC) {
751                                 unlock_extent(io_tree, async_extent->start,
752                                               async_extent->start +
753                                               async_extent->ram_size - 1);
754
755                                 /*
756                                  * we need to redirty the pages if we decide to
757                                  * fallback to uncompressed IO, otherwise we
758                                  * will not submit these pages down to lower
759                                  * layers.
760                                  */
761                                 extent_range_redirty_for_io(inode,
762                                                 async_extent->start,
763                                                 async_extent->start +
764                                                 async_extent->ram_size - 1);
765
766                                 goto retry;
767                         }
768                         goto out_free;
769                 }
770                 /*
771                  * here we're doing allocation and writeback of the
772                  * compressed pages
773                  */
774                 btrfs_drop_extent_cache(inode, async_extent->start,
775                                         async_extent->start +
776                                         async_extent->ram_size - 1, 0);
777
778                 em = alloc_extent_map();
779                 if (!em) {
780                         ret = -ENOMEM;
781                         goto out_free_reserve;
782                 }
783                 em->start = async_extent->start;
784                 em->len = async_extent->ram_size;
785                 em->orig_start = em->start;
786                 em->mod_start = em->start;
787                 em->mod_len = em->len;
788
789                 em->block_start = ins.objectid;
790                 em->block_len = ins.offset;
791                 em->orig_block_len = ins.offset;
792                 em->ram_bytes = async_extent->ram_size;
793                 em->bdev = root->fs_info->fs_devices->latest_bdev;
794                 em->compress_type = async_extent->compress_type;
795                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
796                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
797                 em->generation = -1;
798
799                 while (1) {
800                         write_lock(&em_tree->lock);
801                         ret = add_extent_mapping(em_tree, em, 1);
802                         write_unlock(&em_tree->lock);
803                         if (ret != -EEXIST) {
804                                 free_extent_map(em);
805                                 break;
806                         }
807                         btrfs_drop_extent_cache(inode, async_extent->start,
808                                                 async_extent->start +
809                                                 async_extent->ram_size - 1, 0);
810                 }
811
812                 if (ret)
813                         goto out_free_reserve;
814
815                 ret = btrfs_add_ordered_extent_compress(inode,
816                                                 async_extent->start,
817                                                 ins.objectid,
818                                                 async_extent->ram_size,
819                                                 ins.offset,
820                                                 BTRFS_ORDERED_COMPRESSED,
821                                                 async_extent->compress_type);
822                 if (ret) {
823                         btrfs_drop_extent_cache(inode, async_extent->start,
824                                                 async_extent->start +
825                                                 async_extent->ram_size - 1, 0);
826                         goto out_free_reserve;
827                 }
828
829                 /*
830                  * clear dirty, set writeback and unlock the pages.
831                  */
832                 extent_clear_unlock_delalloc(inode, async_extent->start,
833                                 async_extent->start +
834                                 async_extent->ram_size - 1,
835                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
836                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
837                                 PAGE_SET_WRITEBACK);
838                 ret = btrfs_submit_compressed_write(inode,
839                                     async_extent->start,
840                                     async_extent->ram_size,
841                                     ins.objectid,
842                                     ins.offset, async_extent->pages,
843                                     async_extent->nr_pages);
844                 if (ret) {
845                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
846                         struct page *p = async_extent->pages[0];
847                         const u64 start = async_extent->start;
848                         const u64 end = start + async_extent->ram_size - 1;
849
850                         p->mapping = inode->i_mapping;
851                         tree->ops->writepage_end_io_hook(p, start, end,
852                                                          NULL, 0);
853                         p->mapping = NULL;
854                         extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
855                                                      PAGE_END_WRITEBACK |
856                                                      PAGE_SET_ERROR);
857                         free_async_extent_pages(async_extent);
858                 }
859                 alloc_hint = ins.objectid + ins.offset;
860                 kfree(async_extent);
861                 cond_resched();
862         }
863         return;
864 out_free_reserve:
865         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
866 out_free:
867         extent_clear_unlock_delalloc(inode, async_extent->start,
868                                      async_extent->start +
869                                      async_extent->ram_size - 1,
870                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
871                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
872                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
873                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
874                                      PAGE_SET_ERROR);
875         free_async_extent_pages(async_extent);
876         kfree(async_extent);
877         goto again;
878 }
879
880 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
881                                       u64 num_bytes)
882 {
883         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
884         struct extent_map *em;
885         u64 alloc_hint = 0;
886
887         read_lock(&em_tree->lock);
888         em = search_extent_mapping(em_tree, start, num_bytes);
889         if (em) {
890                 /*
891                  * if block start isn't an actual block number then find the
892                  * first block in this inode and use that as a hint.  If that
893                  * block is also bogus then just don't worry about it.
894                  */
895                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
896                         free_extent_map(em);
897                         em = search_extent_mapping(em_tree, 0, 0);
898                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
899                                 alloc_hint = em->block_start;
900                         if (em)
901                                 free_extent_map(em);
902                 } else {
903                         alloc_hint = em->block_start;
904                         free_extent_map(em);
905                 }
906         }
907         read_unlock(&em_tree->lock);
908
909         return alloc_hint;
910 }
911
912 /*
913  * when extent_io.c finds a delayed allocation range in the file,
914  * the call backs end up in this code.  The basic idea is to
915  * allocate extents on disk for the range, and create ordered data structs
916  * in ram to track those extents.
917  *
918  * locked_page is the page that writepage had locked already.  We use
919  * it to make sure we don't do extra locks or unlocks.
920  *
921  * *page_started is set to one if we unlock locked_page and do everything
922  * required to start IO on it.  It may be clean and already done with
923  * IO when we return.
924  */
925 static noinline int cow_file_range(struct inode *inode,
926                                    struct page *locked_page,
927                                    u64 start, u64 end, int *page_started,
928                                    unsigned long *nr_written,
929                                    int unlock)
930 {
931         struct btrfs_root *root = BTRFS_I(inode)->root;
932         u64 alloc_hint = 0;
933         u64 num_bytes;
934         unsigned long ram_size;
935         u64 disk_num_bytes;
936         u64 cur_alloc_size;
937         u64 blocksize = root->sectorsize;
938         struct btrfs_key ins;
939         struct extent_map *em;
940         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
941         int ret = 0;
942
943         if (btrfs_is_free_space_inode(inode)) {
944                 WARN_ON_ONCE(1);
945                 ret = -EINVAL;
946                 goto out_unlock;
947         }
948
949         num_bytes = ALIGN(end - start + 1, blocksize);
950         num_bytes = max(blocksize,  num_bytes);
951         disk_num_bytes = num_bytes;
952
953         /* if this is a small write inside eof, kick off defrag */
954         if (num_bytes < 64 * 1024 &&
955             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
956                 btrfs_add_inode_defrag(NULL, inode);
957
958         if (start == 0) {
959                 /* lets try to make an inline extent */
960                 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
961                                             NULL);
962                 if (ret == 0) {
963                         extent_clear_unlock_delalloc(inode, start, end, NULL,
964                                      EXTENT_LOCKED | EXTENT_DELALLOC |
965                                      EXTENT_DEFRAG, PAGE_UNLOCK |
966                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
967                                      PAGE_END_WRITEBACK);
968
969                         *nr_written = *nr_written +
970                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
971                         *page_started = 1;
972                         goto out;
973                 } else if (ret < 0) {
974                         goto out_unlock;
975                 }
976         }
977
978         BUG_ON(disk_num_bytes >
979                btrfs_super_total_bytes(root->fs_info->super_copy));
980
981         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
982         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
983
984         while (disk_num_bytes > 0) {
985                 unsigned long op;
986
987                 cur_alloc_size = disk_num_bytes;
988                 ret = btrfs_reserve_extent(root, cur_alloc_size,
989                                            root->sectorsize, 0, alloc_hint,
990                                            &ins, 1, 1);
991                 if (ret < 0)
992                         goto out_unlock;
993
994                 em = alloc_extent_map();
995                 if (!em) {
996                         ret = -ENOMEM;
997                         goto out_reserve;
998                 }
999                 em->start = start;
1000                 em->orig_start = em->start;
1001                 ram_size = ins.offset;
1002                 em->len = ins.offset;
1003                 em->mod_start = em->start;
1004                 em->mod_len = em->len;
1005
1006                 em->block_start = ins.objectid;
1007                 em->block_len = ins.offset;
1008                 em->orig_block_len = ins.offset;
1009                 em->ram_bytes = ram_size;
1010                 em->bdev = root->fs_info->fs_devices->latest_bdev;
1011                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1012                 em->generation = -1;
1013
1014                 while (1) {
1015                         write_lock(&em_tree->lock);
1016                         ret = add_extent_mapping(em_tree, em, 1);
1017                         write_unlock(&em_tree->lock);
1018                         if (ret != -EEXIST) {
1019                                 free_extent_map(em);
1020                                 break;
1021                         }
1022                         btrfs_drop_extent_cache(inode, start,
1023                                                 start + ram_size - 1, 0);
1024                 }
1025                 if (ret)
1026                         goto out_reserve;
1027
1028                 cur_alloc_size = ins.offset;
1029                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1030                                                ram_size, cur_alloc_size, 0);
1031                 if (ret)
1032                         goto out_drop_extent_cache;
1033
1034                 if (root->root_key.objectid ==
1035                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1036                         ret = btrfs_reloc_clone_csums(inode, start,
1037                                                       cur_alloc_size);
1038                         if (ret)
1039                                 goto out_drop_extent_cache;
1040                 }
1041
1042                 if (disk_num_bytes < cur_alloc_size)
1043                         break;
1044
1045                 /* we're not doing compressed IO, don't unlock the first
1046                  * page (which the caller expects to stay locked), don't
1047                  * clear any dirty bits and don't set any writeback bits
1048                  *
1049                  * Do set the Private2 bit so we know this page was properly
1050                  * setup for writepage
1051                  */
1052                 op = unlock ? PAGE_UNLOCK : 0;
1053                 op |= PAGE_SET_PRIVATE2;
1054
1055                 extent_clear_unlock_delalloc(inode, start,
1056                                              start + ram_size - 1, locked_page,
1057                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1058                                              op);
1059                 disk_num_bytes -= cur_alloc_size;
1060                 num_bytes -= cur_alloc_size;
1061                 alloc_hint = ins.objectid + ins.offset;
1062                 start += cur_alloc_size;
1063         }
1064 out:
1065         return ret;
1066
1067 out_drop_extent_cache:
1068         btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1069 out_reserve:
1070         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
1071 out_unlock:
1072         extent_clear_unlock_delalloc(inode, start, end, locked_page,
1073                                      EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1074                                      EXTENT_DELALLOC | EXTENT_DEFRAG,
1075                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1076                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1077         goto out;
1078 }
1079
1080 /*
1081  * work queue call back to started compression on a file and pages
1082  */
1083 static noinline void async_cow_start(struct btrfs_work *work)
1084 {
1085         struct async_cow *async_cow;
1086         int num_added = 0;
1087         async_cow = container_of(work, struct async_cow, work);
1088
1089         compress_file_range(async_cow->inode, async_cow->locked_page,
1090                             async_cow->start, async_cow->end, async_cow,
1091                             &num_added);
1092         if (num_added == 0) {
1093                 btrfs_add_delayed_iput(async_cow->inode);
1094                 async_cow->inode = NULL;
1095         }
1096 }
1097
1098 /*
1099  * work queue call back to submit previously compressed pages
1100  */
1101 static noinline void async_cow_submit(struct btrfs_work *work)
1102 {
1103         struct async_cow *async_cow;
1104         struct btrfs_root *root;
1105         unsigned long nr_pages;
1106
1107         async_cow = container_of(work, struct async_cow, work);
1108
1109         root = async_cow->root;
1110         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1111                 PAGE_CACHE_SHIFT;
1112
1113         /*
1114          * atomic_sub_return implies a barrier for waitqueue_active
1115          */
1116         if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1117             5 * 1024 * 1024 &&
1118             waitqueue_active(&root->fs_info->async_submit_wait))
1119                 wake_up(&root->fs_info->async_submit_wait);
1120
1121         if (async_cow->inode)
1122                 submit_compressed_extents(async_cow->inode, async_cow);
1123 }
1124
1125 static noinline void async_cow_free(struct btrfs_work *work)
1126 {
1127         struct async_cow *async_cow;
1128         async_cow = container_of(work, struct async_cow, work);
1129         if (async_cow->inode)
1130                 btrfs_add_delayed_iput(async_cow->inode);
1131         kfree(async_cow);
1132 }
1133
1134 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1135                                 u64 start, u64 end, int *page_started,
1136                                 unsigned long *nr_written)
1137 {
1138         struct async_cow *async_cow;
1139         struct btrfs_root *root = BTRFS_I(inode)->root;
1140         unsigned long nr_pages;
1141         u64 cur_end;
1142         int limit = 10 * 1024 * 1024;
1143
1144         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1145                          1, 0, NULL, GFP_NOFS);
1146         while (start < end) {
1147                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1148                 BUG_ON(!async_cow); /* -ENOMEM */
1149                 async_cow->inode = igrab(inode);
1150                 async_cow->root = root;
1151                 async_cow->locked_page = locked_page;
1152                 async_cow->start = start;
1153
1154                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1155                     !btrfs_test_opt(root, FORCE_COMPRESS))
1156                         cur_end = end;
1157                 else
1158                         cur_end = min(end, start + 512 * 1024 - 1);
1159
1160                 async_cow->end = cur_end;
1161                 INIT_LIST_HEAD(&async_cow->extents);
1162
1163                 btrfs_init_work(&async_cow->work,
1164                                 btrfs_delalloc_helper,
1165                                 async_cow_start, async_cow_submit,
1166                                 async_cow_free);
1167
1168                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1169                         PAGE_CACHE_SHIFT;
1170                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1171
1172                 btrfs_queue_work(root->fs_info->delalloc_workers,
1173                                  &async_cow->work);
1174
1175                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1176                         wait_event(root->fs_info->async_submit_wait,
1177                            (atomic_read(&root->fs_info->async_delalloc_pages) <
1178                             limit));
1179                 }
1180
1181                 while (atomic_read(&root->fs_info->async_submit_draining) &&
1182                       atomic_read(&root->fs_info->async_delalloc_pages)) {
1183                         wait_event(root->fs_info->async_submit_wait,
1184                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
1185                            0));
1186                 }
1187
1188                 *nr_written += nr_pages;
1189                 start = cur_end + 1;
1190         }
1191         *page_started = 1;
1192         return 0;
1193 }
1194
1195 static noinline int csum_exist_in_range(struct btrfs_root *root,
1196                                         u64 bytenr, u64 num_bytes)
1197 {
1198         int ret;
1199         struct btrfs_ordered_sum *sums;
1200         LIST_HEAD(list);
1201
1202         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1203                                        bytenr + num_bytes - 1, &list, 0);
1204         if (ret == 0 && list_empty(&list))
1205                 return 0;
1206
1207         while (!list_empty(&list)) {
1208                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1209                 list_del(&sums->list);
1210                 kfree(sums);
1211         }
1212         return 1;
1213 }
1214
1215 /*
1216  * when nowcow writeback call back.  This checks for snapshots or COW copies
1217  * of the extents that exist in the file, and COWs the file as required.
1218  *
1219  * If no cow copies or snapshots exist, we write directly to the existing
1220  * blocks on disk
1221  */
1222 static noinline int run_delalloc_nocow(struct inode *inode,
1223                                        struct page *locked_page,
1224                               u64 start, u64 end, int *page_started, int force,
1225                               unsigned long *nr_written)
1226 {
1227         struct btrfs_root *root = BTRFS_I(inode)->root;
1228         struct btrfs_trans_handle *trans;
1229         struct extent_buffer *leaf;
1230         struct btrfs_path *path;
1231         struct btrfs_file_extent_item *fi;
1232         struct btrfs_key found_key;
1233         u64 cow_start;
1234         u64 cur_offset;
1235         u64 extent_end;
1236         u64 extent_offset;
1237         u64 disk_bytenr;
1238         u64 num_bytes;
1239         u64 disk_num_bytes;
1240         u64 ram_bytes;
1241         int extent_type;
1242         int ret, err;
1243         int type;
1244         int nocow;
1245         int check_prev = 1;
1246         bool nolock;
1247         u64 ino = btrfs_ino(inode);
1248
1249         path = btrfs_alloc_path();
1250         if (!path) {
1251                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1252                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1253                                              EXTENT_DO_ACCOUNTING |
1254                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1255                                              PAGE_CLEAR_DIRTY |
1256                                              PAGE_SET_WRITEBACK |
1257                                              PAGE_END_WRITEBACK);
1258                 return -ENOMEM;
1259         }
1260
1261         nolock = btrfs_is_free_space_inode(inode);
1262
1263         if (nolock)
1264                 trans = btrfs_join_transaction_nolock(root);
1265         else
1266                 trans = btrfs_join_transaction(root);
1267
1268         if (IS_ERR(trans)) {
1269                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1270                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1271                                              EXTENT_DO_ACCOUNTING |
1272                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1273                                              PAGE_CLEAR_DIRTY |
1274                                              PAGE_SET_WRITEBACK |
1275                                              PAGE_END_WRITEBACK);
1276                 btrfs_free_path(path);
1277                 return PTR_ERR(trans);
1278         }
1279
1280         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1281
1282         cow_start = (u64)-1;
1283         cur_offset = start;
1284         while (1) {
1285                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1286                                                cur_offset, 0);
1287                 if (ret < 0)
1288                         goto error;
1289                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1290                         leaf = path->nodes[0];
1291                         btrfs_item_key_to_cpu(leaf, &found_key,
1292                                               path->slots[0] - 1);
1293                         if (found_key.objectid == ino &&
1294                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1295                                 path->slots[0]--;
1296                 }
1297                 check_prev = 0;
1298 next_slot:
1299                 leaf = path->nodes[0];
1300                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1301                         ret = btrfs_next_leaf(root, path);
1302                         if (ret < 0)
1303                                 goto error;
1304                         if (ret > 0)
1305                                 break;
1306                         leaf = path->nodes[0];
1307                 }
1308
1309                 nocow = 0;
1310                 disk_bytenr = 0;
1311                 num_bytes = 0;
1312                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1313
1314                 if (found_key.objectid > ino)
1315                         break;
1316                 if (WARN_ON_ONCE(found_key.objectid < ino) ||
1317                     found_key.type < BTRFS_EXTENT_DATA_KEY) {
1318                         path->slots[0]++;
1319                         goto next_slot;
1320                 }
1321                 if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
1322                     found_key.offset > end)
1323                         break;
1324
1325                 if (found_key.offset > cur_offset) {
1326                         extent_end = found_key.offset;
1327                         extent_type = 0;
1328                         goto out_check;
1329                 }
1330
1331                 fi = btrfs_item_ptr(leaf, path->slots[0],
1332                                     struct btrfs_file_extent_item);
1333                 extent_type = btrfs_file_extent_type(leaf, fi);
1334
1335                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1336                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1337                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1338                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1339                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1340                         extent_end = found_key.offset +
1341                                 btrfs_file_extent_num_bytes(leaf, fi);
1342                         disk_num_bytes =
1343                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1344                         if (extent_end <= start) {
1345                                 path->slots[0]++;
1346                                 goto next_slot;
1347                         }
1348                         if (disk_bytenr == 0)
1349                                 goto out_check;
1350                         if (btrfs_file_extent_compression(leaf, fi) ||
1351                             btrfs_file_extent_encryption(leaf, fi) ||
1352                             btrfs_file_extent_other_encoding(leaf, fi))
1353                                 goto out_check;
1354                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1355                                 goto out_check;
1356                         if (btrfs_extent_readonly(root, disk_bytenr))
1357                                 goto out_check;
1358                         if (btrfs_cross_ref_exist(trans, root, ino,
1359                                                   found_key.offset -
1360                                                   extent_offset, disk_bytenr))
1361                                 goto out_check;
1362                         disk_bytenr += extent_offset;
1363                         disk_bytenr += cur_offset - found_key.offset;
1364                         num_bytes = min(end + 1, extent_end) - cur_offset;
1365                         /*
1366                          * if there are pending snapshots for this root,
1367                          * we fall into common COW way.
1368                          */
1369                         if (!nolock) {
1370                                 err = btrfs_start_write_no_snapshoting(root);
1371                                 if (!err)
1372                                         goto out_check;
1373                         }
1374                         /*
1375                          * force cow if csum exists in the range.
1376                          * this ensure that csum for a given extent are
1377                          * either valid or do not exist.
1378                          */
1379                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1380                                 goto out_check;
1381                         nocow = 1;
1382                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1383                         extent_end = found_key.offset +
1384                                 btrfs_file_extent_inline_len(leaf,
1385                                                      path->slots[0], fi);
1386                         extent_end = ALIGN(extent_end, root->sectorsize);
1387                 } else {
1388                         BUG_ON(1);
1389                 }
1390 out_check:
1391                 if (extent_end <= start) {
1392                         path->slots[0]++;
1393                         if (!nolock && nocow)
1394                                 btrfs_end_write_no_snapshoting(root);
1395                         goto next_slot;
1396                 }
1397                 if (!nocow) {
1398                         if (cow_start == (u64)-1)
1399                                 cow_start = cur_offset;
1400                         cur_offset = extent_end;
1401                         if (cur_offset > end)
1402                                 break;
1403                         path->slots[0]++;
1404                         goto next_slot;
1405                 }
1406
1407                 btrfs_release_path(path);
1408                 if (cow_start != (u64)-1) {
1409                         ret = cow_file_range(inode, locked_page,
1410                                              cow_start, found_key.offset - 1,
1411                                              page_started, nr_written, 1);
1412                         if (ret) {
1413                                 if (!nolock && nocow)
1414                                         btrfs_end_write_no_snapshoting(root);
1415                                 goto error;
1416                         }
1417                         cow_start = (u64)-1;
1418                 }
1419
1420                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1421                         struct extent_map *em;
1422                         struct extent_map_tree *em_tree;
1423                         em_tree = &BTRFS_I(inode)->extent_tree;
1424                         em = alloc_extent_map();
1425                         BUG_ON(!em); /* -ENOMEM */
1426                         em->start = cur_offset;
1427                         em->orig_start = found_key.offset - extent_offset;
1428                         em->len = num_bytes;
1429                         em->block_len = num_bytes;
1430                         em->block_start = disk_bytenr;
1431                         em->orig_block_len = disk_num_bytes;
1432                         em->ram_bytes = ram_bytes;
1433                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1434                         em->mod_start = em->start;
1435                         em->mod_len = em->len;
1436                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1437                         set_bit(EXTENT_FLAG_FILLING, &em->flags);
1438                         em->generation = -1;
1439                         while (1) {
1440                                 write_lock(&em_tree->lock);
1441                                 ret = add_extent_mapping(em_tree, em, 1);
1442                                 write_unlock(&em_tree->lock);
1443                                 if (ret != -EEXIST) {
1444                                         free_extent_map(em);
1445                                         break;
1446                                 }
1447                                 btrfs_drop_extent_cache(inode, em->start,
1448                                                 em->start + em->len - 1, 0);
1449                         }
1450                         type = BTRFS_ORDERED_PREALLOC;
1451                 } else {
1452                         type = BTRFS_ORDERED_NOCOW;
1453                 }
1454
1455                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1456                                                num_bytes, num_bytes, type);
1457                 BUG_ON(ret); /* -ENOMEM */
1458
1459                 if (root->root_key.objectid ==
1460                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1461                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1462                                                       num_bytes);
1463                         if (ret) {
1464                                 if (!nolock && nocow)
1465                                         btrfs_end_write_no_snapshoting(root);
1466                                 goto error;
1467                         }
1468                 }
1469
1470                 extent_clear_unlock_delalloc(inode, cur_offset,
1471                                              cur_offset + num_bytes - 1,
1472                                              locked_page, EXTENT_LOCKED |
1473                                              EXTENT_DELALLOC, PAGE_UNLOCK |
1474                                              PAGE_SET_PRIVATE2);
1475                 if (!nolock && nocow)
1476                         btrfs_end_write_no_snapshoting(root);
1477                 cur_offset = extent_end;
1478                 if (cur_offset > end)
1479                         break;
1480         }
1481         btrfs_release_path(path);
1482
1483         if (cur_offset <= end && cow_start == (u64)-1) {
1484                 cow_start = cur_offset;
1485                 cur_offset = end;
1486         }
1487
1488         if (cow_start != (u64)-1) {
1489                 ret = cow_file_range(inode, locked_page, cow_start, end,
1490                                      page_started, nr_written, 1);
1491                 if (ret)
1492                         goto error;
1493         }
1494
1495 error:
1496         err = btrfs_end_transaction(trans, root);
1497         if (!ret)
1498                 ret = err;
1499
1500         if (ret && cur_offset < end)
1501                 extent_clear_unlock_delalloc(inode, cur_offset, end,
1502                                              locked_page, EXTENT_LOCKED |
1503                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1504                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1505                                              PAGE_CLEAR_DIRTY |
1506                                              PAGE_SET_WRITEBACK |
1507                                              PAGE_END_WRITEBACK);
1508         btrfs_free_path(path);
1509         return ret;
1510 }
1511
1512 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1513 {
1514
1515         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1516             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1517                 return 0;
1518
1519         /*
1520          * @defrag_bytes is a hint value, no spinlock held here,
1521          * if is not zero, it means the file is defragging.
1522          * Force cow if given extent needs to be defragged.
1523          */
1524         if (BTRFS_I(inode)->defrag_bytes &&
1525             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1526                            EXTENT_DEFRAG, 0, NULL))
1527                 return 1;
1528
1529         return 0;
1530 }
1531
1532 /*
1533  * extent_io.c call back to do delayed allocation processing
1534  */
1535 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1536                               u64 start, u64 end, int *page_started,
1537                               unsigned long *nr_written)
1538 {
1539         int ret;
1540         int force_cow = need_force_cow(inode, start, end);
1541
1542         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1543                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1544                                          page_started, 1, nr_written);
1545         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1546                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1547                                          page_started, 0, nr_written);
1548         } else if (!inode_need_compress(inode)) {
1549                 ret = cow_file_range(inode, locked_page, start, end,
1550                                       page_started, nr_written, 1);
1551         } else {
1552                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1553                         &BTRFS_I(inode)->runtime_flags);
1554                 ret = cow_file_range_async(inode, locked_page, start, end,
1555                                            page_started, nr_written);
1556         }
1557         return ret;
1558 }
1559
1560 static void btrfs_split_extent_hook(struct inode *inode,
1561                                     struct extent_state *orig, u64 split)
1562 {
1563         u64 size;
1564
1565         /* not delalloc, ignore it */
1566         if (!(orig->state & EXTENT_DELALLOC))
1567                 return;
1568
1569         size = orig->end - orig->start + 1;
1570         if (size > BTRFS_MAX_EXTENT_SIZE) {
1571                 u64 num_extents;
1572                 u64 new_size;
1573
1574                 /*
1575                  * See the explanation in btrfs_merge_extent_hook, the same
1576                  * applies here, just in reverse.
1577                  */
1578                 new_size = orig->end - split + 1;
1579                 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1580                                         BTRFS_MAX_EXTENT_SIZE);
1581                 new_size = split - orig->start;
1582                 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1583                                         BTRFS_MAX_EXTENT_SIZE);
1584                 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1585                               BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1586                         return;
1587         }
1588
1589         spin_lock(&BTRFS_I(inode)->lock);
1590         BTRFS_I(inode)->outstanding_extents++;
1591         spin_unlock(&BTRFS_I(inode)->lock);
1592 }
1593
1594 /*
1595  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1596  * extents so we can keep track of new extents that are just merged onto old
1597  * extents, such as when we are doing sequential writes, so we can properly
1598  * account for the metadata space we'll need.
1599  */
1600 static void btrfs_merge_extent_hook(struct inode *inode,
1601                                     struct extent_state *new,
1602                                     struct extent_state *other)
1603 {
1604         u64 new_size, old_size;
1605         u64 num_extents;
1606
1607         /* not delalloc, ignore it */
1608         if (!(other->state & EXTENT_DELALLOC))
1609                 return;
1610
1611         if (new->start > other->start)
1612                 new_size = new->end - other->start + 1;
1613         else
1614                 new_size = other->end - new->start + 1;
1615
1616         /* we're not bigger than the max, unreserve the space and go */
1617         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1618                 spin_lock(&BTRFS_I(inode)->lock);
1619                 BTRFS_I(inode)->outstanding_extents--;
1620                 spin_unlock(&BTRFS_I(inode)->lock);
1621                 return;
1622         }
1623
1624         /*
1625          * We have to add up either side to figure out how many extents were
1626          * accounted for before we merged into one big extent.  If the number of
1627          * extents we accounted for is <= the amount we need for the new range
1628          * then we can return, otherwise drop.  Think of it like this
1629          *
1630          * [ 4k][MAX_SIZE]
1631          *
1632          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1633          * need 2 outstanding extents, on one side we have 1 and the other side
1634          * we have 1 so they are == and we can return.  But in this case
1635          *
1636          * [MAX_SIZE+4k][MAX_SIZE+4k]
1637          *
1638          * Each range on their own accounts for 2 extents, but merged together
1639          * they are only 3 extents worth of accounting, so we need to drop in
1640          * this case.
1641          */
1642         old_size = other->end - other->start + 1;
1643         num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1644                                 BTRFS_MAX_EXTENT_SIZE);
1645         old_size = new->end - new->start + 1;
1646         num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1647                                  BTRFS_MAX_EXTENT_SIZE);
1648
1649         if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1650                       BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1651                 return;
1652
1653         spin_lock(&BTRFS_I(inode)->lock);
1654         BTRFS_I(inode)->outstanding_extents--;
1655         spin_unlock(&BTRFS_I(inode)->lock);
1656 }
1657
1658 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1659                                       struct inode *inode)
1660 {
1661         spin_lock(&root->delalloc_lock);
1662         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1663                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1664                               &root->delalloc_inodes);
1665                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1666                         &BTRFS_I(inode)->runtime_flags);
1667                 root->nr_delalloc_inodes++;
1668                 if (root->nr_delalloc_inodes == 1) {
1669                         spin_lock(&root->fs_info->delalloc_root_lock);
1670                         BUG_ON(!list_empty(&root->delalloc_root));
1671                         list_add_tail(&root->delalloc_root,
1672                                       &root->fs_info->delalloc_roots);
1673                         spin_unlock(&root->fs_info->delalloc_root_lock);
1674                 }
1675         }
1676         spin_unlock(&root->delalloc_lock);
1677 }
1678
1679 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1680                                      struct inode *inode)
1681 {
1682         spin_lock(&root->delalloc_lock);
1683         if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1684                 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1685                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1686                           &BTRFS_I(inode)->runtime_flags);
1687                 root->nr_delalloc_inodes--;
1688                 if (!root->nr_delalloc_inodes) {
1689                         spin_lock(&root->fs_info->delalloc_root_lock);
1690                         BUG_ON(list_empty(&root->delalloc_root));
1691                         list_del_init(&root->delalloc_root);
1692                         spin_unlock(&root->fs_info->delalloc_root_lock);
1693                 }
1694         }
1695         spin_unlock(&root->delalloc_lock);
1696 }
1697
1698 /*
1699  * extent_io.c set_bit_hook, used to track delayed allocation
1700  * bytes in this file, and to maintain the list of inodes that
1701  * have pending delalloc work to be done.
1702  */
1703 static void btrfs_set_bit_hook(struct inode *inode,
1704                                struct extent_state *state, unsigned *bits)
1705 {
1706
1707         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1708                 WARN_ON(1);
1709         /*
1710          * set_bit and clear bit hooks normally require _irqsave/restore
1711          * but in this case, we are only testing for the DELALLOC
1712          * bit, which is only set or cleared with irqs on
1713          */
1714         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1715                 struct btrfs_root *root = BTRFS_I(inode)->root;
1716                 u64 len = state->end + 1 - state->start;
1717                 bool do_list = !btrfs_is_free_space_inode(inode);
1718
1719                 if (*bits & EXTENT_FIRST_DELALLOC) {
1720                         *bits &= ~EXTENT_FIRST_DELALLOC;
1721                 } else {
1722                         spin_lock(&BTRFS_I(inode)->lock);
1723                         BTRFS_I(inode)->outstanding_extents++;
1724                         spin_unlock(&BTRFS_I(inode)->lock);
1725                 }
1726
1727                 /* For sanity tests */
1728                 if (btrfs_test_is_dummy_root(root))
1729                         return;
1730
1731                 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1732                                      root->fs_info->delalloc_batch);
1733                 spin_lock(&BTRFS_I(inode)->lock);
1734                 BTRFS_I(inode)->delalloc_bytes += len;
1735                 if (*bits & EXTENT_DEFRAG)
1736                         BTRFS_I(inode)->defrag_bytes += len;
1737                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1738                                          &BTRFS_I(inode)->runtime_flags))
1739                         btrfs_add_delalloc_inodes(root, inode);
1740                 spin_unlock(&BTRFS_I(inode)->lock);
1741         }
1742 }
1743
1744 /*
1745  * extent_io.c clear_bit_hook, see set_bit_hook for why
1746  */
1747 static void btrfs_clear_bit_hook(struct inode *inode,
1748                                  struct extent_state *state,
1749                                  unsigned *bits)
1750 {
1751         u64 len = state->end + 1 - state->start;
1752         u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
1753                                     BTRFS_MAX_EXTENT_SIZE);
1754
1755         spin_lock(&BTRFS_I(inode)->lock);
1756         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
1757                 BTRFS_I(inode)->defrag_bytes -= len;
1758         spin_unlock(&BTRFS_I(inode)->lock);
1759
1760         /*
1761          * set_bit and clear bit hooks normally require _irqsave/restore
1762          * but in this case, we are only testing for the DELALLOC
1763          * bit, which is only set or cleared with irqs on
1764          */
1765         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1766                 struct btrfs_root *root = BTRFS_I(inode)->root;
1767                 bool do_list = !btrfs_is_free_space_inode(inode);
1768
1769                 if (*bits & EXTENT_FIRST_DELALLOC) {
1770                         *bits &= ~EXTENT_FIRST_DELALLOC;
1771                 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1772                         spin_lock(&BTRFS_I(inode)->lock);
1773                         BTRFS_I(inode)->outstanding_extents -= num_extents;
1774                         spin_unlock(&BTRFS_I(inode)->lock);
1775                 }
1776
1777                 /*
1778                  * We don't reserve metadata space for space cache inodes so we
1779                  * don't need to call dellalloc_release_metadata if there is an
1780                  * error.
1781                  */
1782                 if (*bits & EXTENT_DO_ACCOUNTING &&
1783                     root != root->fs_info->tree_root)
1784                         btrfs_delalloc_release_metadata(inode, len);
1785
1786                 /* For sanity tests. */
1787                 if (btrfs_test_is_dummy_root(root))
1788                         return;
1789
1790                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1791                     && do_list && !(state->state & EXTENT_NORESERVE))
1792                         btrfs_free_reserved_data_space_noquota(inode,
1793                                         state->start, len);
1794
1795                 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1796                                      root->fs_info->delalloc_batch);
1797                 spin_lock(&BTRFS_I(inode)->lock);
1798                 BTRFS_I(inode)->delalloc_bytes -= len;
1799                 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1800                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1801                              &BTRFS_I(inode)->runtime_flags))
1802                         btrfs_del_delalloc_inode(root, inode);
1803                 spin_unlock(&BTRFS_I(inode)->lock);
1804         }
1805 }
1806
1807 /*
1808  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1809  * we don't create bios that span stripes or chunks
1810  */
1811 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1812                          size_t size, struct bio *bio,
1813                          unsigned long bio_flags)
1814 {
1815         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1816         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1817         u64 length = 0;
1818         u64 map_length;
1819         int ret;
1820
1821         if (bio_flags & EXTENT_BIO_COMPRESSED)
1822                 return 0;
1823
1824         length = bio->bi_iter.bi_size;
1825         map_length = length;
1826         ret = btrfs_map_block(root->fs_info, rw, logical,
1827                               &map_length, NULL, 0);
1828         /* Will always return 0 with map_multi == NULL */
1829         BUG_ON(ret < 0);
1830         if (map_length < length + size)
1831                 return 1;
1832         return 0;
1833 }
1834
1835 /*
1836  * in order to insert checksums into the metadata in large chunks,
1837  * we wait until bio submission time.   All the pages in the bio are
1838  * checksummed and sums are attached onto the ordered extent record.
1839  *
1840  * At IO completion time the cums attached on the ordered extent record
1841  * are inserted into the btree
1842  */
1843 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1844                                     struct bio *bio, int mirror_num,
1845                                     unsigned long bio_flags,
1846                                     u64 bio_offset)
1847 {
1848         struct btrfs_root *root = BTRFS_I(inode)->root;
1849         int ret = 0;
1850
1851         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1852         BUG_ON(ret); /* -ENOMEM */
1853         return 0;
1854 }
1855
1856 /*
1857  * in order to insert checksums into the metadata in large chunks,
1858  * we wait until bio submission time.   All the pages in the bio are
1859  * checksummed and sums are attached onto the ordered extent record.
1860  *
1861  * At IO completion time the cums attached on the ordered extent record
1862  * are inserted into the btree
1863  */
1864 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1865                           int mirror_num, unsigned long bio_flags,
1866                           u64 bio_offset)
1867 {
1868         struct btrfs_root *root = BTRFS_I(inode)->root;
1869         int ret;
1870
1871         ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1872         if (ret) {
1873                 bio->bi_error = ret;
1874                 bio_endio(bio);
1875         }
1876         return ret;
1877 }
1878
1879 /*
1880  * extent_io.c submission hook. This does the right thing for csum calculation
1881  * on write, or reading the csums from the tree before a read
1882  */
1883 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1884                           int mirror_num, unsigned long bio_flags,
1885                           u64 bio_offset)
1886 {
1887         struct btrfs_root *root = BTRFS_I(inode)->root;
1888         enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
1889         int ret = 0;
1890         int skip_sum;
1891         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1892
1893         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1894
1895         if (btrfs_is_free_space_inode(inode))
1896                 metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
1897
1898         if (!(rw & REQ_WRITE)) {
1899                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1900                 if (ret)
1901                         goto out;
1902
1903                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1904                         ret = btrfs_submit_compressed_read(inode, bio,
1905                                                            mirror_num,
1906                                                            bio_flags);
1907                         goto out;
1908                 } else if (!skip_sum) {
1909                         ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1910                         if (ret)
1911                                 goto out;
1912                 }
1913                 goto mapit;
1914         } else if (async && !skip_sum) {
1915                 /* csum items have already been cloned */
1916                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1917                         goto mapit;
1918                 /* we're doing a write, do the async checksumming */
1919                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1920                                    inode, rw, bio, mirror_num,
1921                                    bio_flags, bio_offset,
1922                                    __btrfs_submit_bio_start,
1923                                    __btrfs_submit_bio_done);
1924                 goto out;
1925         } else if (!skip_sum) {
1926                 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1927                 if (ret)
1928                         goto out;
1929         }
1930
1931 mapit:
1932         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1933
1934 out:
1935         if (ret < 0) {
1936                 bio->bi_error = ret;
1937                 bio_endio(bio);
1938         }
1939         return ret;
1940 }
1941
1942 /*
1943  * given a list of ordered sums record them in the inode.  This happens
1944  * at IO completion time based on sums calculated at bio submission time.
1945  */
1946 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1947                              struct inode *inode, u64 file_offset,
1948                              struct list_head *list)
1949 {
1950         struct btrfs_ordered_sum *sum;
1951
1952         list_for_each_entry(sum, list, list) {
1953                 trans->adding_csums = 1;
1954                 btrfs_csum_file_blocks(trans,
1955                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1956                 trans->adding_csums = 0;
1957         }
1958         return 0;
1959 }
1960
1961 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1962                               struct extent_state **cached_state)
1963 {
1964         WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1965         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1966                                    cached_state, GFP_NOFS);
1967 }
1968
1969 /* see btrfs_writepage_start_hook for details on why this is required */
1970 struct btrfs_writepage_fixup {
1971         struct page *page;
1972         struct btrfs_work work;
1973 };
1974
1975 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1976 {
1977         struct btrfs_writepage_fixup *fixup;
1978         struct btrfs_ordered_extent *ordered;
1979         struct extent_state *cached_state = NULL;
1980         struct page *page;
1981         struct inode *inode;
1982         u64 page_start;
1983         u64 page_end;
1984         int ret;
1985
1986         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1987         page = fixup->page;
1988 again:
1989         lock_page(page);
1990         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1991                 ClearPageChecked(page);
1992                 goto out_page;
1993         }
1994
1995         inode = page->mapping->host;
1996         page_start = page_offset(page);
1997         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1998
1999         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
2000                          &cached_state);
2001
2002         /* already ordered? We're done */
2003         if (PagePrivate2(page))
2004                 goto out;
2005
2006         ordered = btrfs_lookup_ordered_extent(inode, page_start);
2007         if (ordered) {
2008                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
2009                                      page_end, &cached_state, GFP_NOFS);
2010                 unlock_page(page);
2011                 btrfs_start_ordered_extent(inode, ordered, 1);
2012                 btrfs_put_ordered_extent(ordered);
2013                 goto again;
2014         }
2015
2016         ret = btrfs_delalloc_reserve_space(inode, page_start,
2017                                            PAGE_CACHE_SIZE);
2018         if (ret) {
2019                 mapping_set_error(page->mapping, ret);
2020                 end_extent_writepage(page, ret, page_start, page_end);
2021                 ClearPageChecked(page);
2022                 goto out;
2023          }
2024
2025         btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
2026         ClearPageChecked(page);
2027         set_page_dirty(page);
2028 out:
2029         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2030                              &cached_state, GFP_NOFS);
2031 out_page:
2032         unlock_page(page);
2033         page_cache_release(page);
2034         kfree(fixup);
2035 }
2036
2037 /*
2038  * There are a few paths in the higher layers of the kernel that directly
2039  * set the page dirty bit without asking the filesystem if it is a
2040  * good idea.  This causes problems because we want to make sure COW
2041  * properly happens and the data=ordered rules are followed.
2042  *
2043  * In our case any range that doesn't have the ORDERED bit set
2044  * hasn't been properly setup for IO.  We kick off an async process
2045  * to fix it up.  The async helper will wait for ordered extents, set
2046  * the delalloc bit and make it safe to write the page.
2047  */
2048 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2049 {
2050         struct inode *inode = page->mapping->host;
2051         struct btrfs_writepage_fixup *fixup;
2052         struct btrfs_root *root = BTRFS_I(inode)->root;
2053
2054         /* this page is properly in the ordered list */
2055         if (TestClearPagePrivate2(page))
2056                 return 0;
2057
2058         if (PageChecked(page))
2059                 return -EAGAIN;
2060
2061         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2062         if (!fixup)
2063                 return -EAGAIN;
2064
2065         SetPageChecked(page);
2066         page_cache_get(page);
2067         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2068                         btrfs_writepage_fixup_worker, NULL, NULL);
2069         fixup->page = page;
2070         btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
2071         return -EBUSY;
2072 }
2073
2074 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2075                                        struct inode *inode, u64 file_pos,
2076                                        u64 disk_bytenr, u64 disk_num_bytes,
2077                                        u64 num_bytes, u64 ram_bytes,
2078                                        u8 compression, u8 encryption,
2079                                        u16 other_encoding, int extent_type)
2080 {
2081         struct btrfs_root *root = BTRFS_I(inode)->root;
2082         struct btrfs_file_extent_item *fi;
2083         struct btrfs_path *path;
2084         struct extent_buffer *leaf;
2085         struct btrfs_key ins;
2086         int extent_inserted = 0;
2087         int ret;
2088
2089         path = btrfs_alloc_path();
2090         if (!path)
2091                 return -ENOMEM;
2092
2093         /*
2094          * we may be replacing one extent in the tree with another.
2095          * The new extent is pinned in the extent map, and we don't want
2096          * to drop it from the cache until it is completely in the btree.
2097          *
2098          * So, tell btrfs_drop_extents to leave this extent in the cache.
2099          * the caller is expected to unpin it and allow it to be merged
2100          * with the others.
2101          */
2102         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2103                                    file_pos + num_bytes, NULL, 0,
2104                                    1, sizeof(*fi), &extent_inserted);
2105         if (ret)
2106                 goto out;
2107
2108         if (!extent_inserted) {
2109                 ins.objectid = btrfs_ino(inode);
2110                 ins.offset = file_pos;
2111                 ins.type = BTRFS_EXTENT_DATA_KEY;
2112
2113                 path->leave_spinning = 1;
2114                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2115                                               sizeof(*fi));
2116                 if (ret)
2117                         goto out;
2118         }
2119         leaf = path->nodes[0];
2120         fi = btrfs_item_ptr(leaf, path->slots[0],
2121                             struct btrfs_file_extent_item);
2122         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2123         btrfs_set_file_extent_type(leaf, fi, extent_type);
2124         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2125         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2126         btrfs_set_file_extent_offset(leaf, fi, 0);
2127         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2128         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2129         btrfs_set_file_extent_compression(leaf, fi, compression);
2130         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2131         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2132
2133         btrfs_mark_buffer_dirty(leaf);
2134         btrfs_release_path(path);
2135
2136         inode_add_bytes(inode, num_bytes);
2137
2138         ins.objectid = disk_bytenr;
2139         ins.offset = disk_num_bytes;
2140         ins.type = BTRFS_EXTENT_ITEM_KEY;
2141         ret = btrfs_alloc_reserved_file_extent(trans, root,
2142                                         root->root_key.objectid,
2143                                         btrfs_ino(inode), file_pos,
2144                                         ram_bytes, &ins);
2145         /*
2146          * Release the reserved range from inode dirty range map, as it is
2147          * already moved into delayed_ref_head
2148          */
2149         btrfs_qgroup_release_data(inode, file_pos, ram_bytes);
2150 out:
2151         btrfs_free_path(path);
2152
2153         return ret;
2154 }
2155
2156 /* snapshot-aware defrag */
2157 struct sa_defrag_extent_backref {
2158         struct rb_node node;
2159         struct old_sa_defrag_extent *old;
2160         u64 root_id;
2161         u64 inum;
2162         u64 file_pos;
2163         u64 extent_offset;
2164         u64 num_bytes;
2165         u64 generation;
2166 };
2167
2168 struct old_sa_defrag_extent {
2169         struct list_head list;
2170         struct new_sa_defrag_extent *new;
2171
2172         u64 extent_offset;
2173         u64 bytenr;
2174         u64 offset;
2175         u64 len;
2176         int count;
2177 };
2178
2179 struct new_sa_defrag_extent {
2180         struct rb_root root;
2181         struct list_head head;
2182         struct btrfs_path *path;
2183         struct inode *inode;
2184         u64 file_pos;
2185         u64 len;
2186         u64 bytenr;
2187         u64 disk_len;
2188         u8 compress_type;
2189 };
2190
2191 static int backref_comp(struct sa_defrag_extent_backref *b1,
2192                         struct sa_defrag_extent_backref *b2)
2193 {
2194         if (b1->root_id < b2->root_id)
2195                 return -1;
2196         else if (b1->root_id > b2->root_id)
2197                 return 1;
2198
2199         if (b1->inum < b2->inum)
2200                 return -1;
2201         else if (b1->inum > b2->inum)
2202                 return 1;
2203
2204         if (b1->file_pos < b2->file_pos)
2205                 return -1;
2206         else if (b1->file_pos > b2->file_pos)
2207                 return 1;
2208
2209         /*
2210          * [------------------------------] ===> (a range of space)
2211          *     |<--->|   |<---->| =============> (fs/file tree A)
2212          * |<---------------------------->| ===> (fs/file tree B)
2213          *
2214          * A range of space can refer to two file extents in one tree while
2215          * refer to only one file extent in another tree.
2216          *
2217          * So we may process a disk offset more than one time(two extents in A)
2218          * and locate at the same extent(one extent in B), then insert two same
2219          * backrefs(both refer to the extent in B).
2220          */
2221         return 0;
2222 }
2223
2224 static void backref_insert(struct rb_root *root,
2225                            struct sa_defrag_extent_backref *backref)
2226 {
2227         struct rb_node **p = &root->rb_node;
2228         struct rb_node *parent = NULL;
2229         struct sa_defrag_extent_backref *entry;
2230         int ret;
2231
2232         while (*p) {
2233                 parent = *p;
2234                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2235
2236                 ret = backref_comp(backref, entry);
2237                 if (ret < 0)
2238                         p = &(*p)->rb_left;
2239                 else
2240                         p = &(*p)->rb_right;
2241         }
2242
2243         rb_link_node(&backref->node, parent, p);
2244         rb_insert_color(&backref->node, root);
2245 }
2246
2247 /*
2248  * Note the backref might has changed, and in this case we just return 0.
2249  */
2250 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2251                                        void *ctx)
2252 {
2253         struct btrfs_file_extent_item *extent;
2254         struct btrfs_fs_info *fs_info;
2255         struct old_sa_defrag_extent *old = ctx;
2256         struct new_sa_defrag_extent *new = old->new;
2257         struct btrfs_path *path = new->path;
2258         struct btrfs_key key;
2259         struct btrfs_root *root;
2260         struct sa_defrag_extent_backref *backref;
2261         struct extent_buffer *leaf;
2262         struct inode *inode = new->inode;
2263         int slot;
2264         int ret;
2265         u64 extent_offset;
2266         u64 num_bytes;
2267
2268         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2269             inum == btrfs_ino(inode))
2270                 return 0;
2271
2272         key.objectid = root_id;
2273         key.type = BTRFS_ROOT_ITEM_KEY;
2274         key.offset = (u64)-1;
2275
2276         fs_info = BTRFS_I(inode)->root->fs_info;
2277         root = btrfs_read_fs_root_no_name(fs_info, &key);
2278         if (IS_ERR(root)) {
2279                 if (PTR_ERR(root) == -ENOENT)
2280                         return 0;
2281                 WARN_ON(1);
2282                 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2283                          inum, offset, root_id);
2284                 return PTR_ERR(root);
2285         }
2286
2287         key.objectid = inum;
2288         key.type = BTRFS_EXTENT_DATA_KEY;
2289         if (offset > (u64)-1 << 32)
2290                 key.offset = 0;
2291         else
2292                 key.offset = offset;
2293
2294         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2295         if (WARN_ON(ret < 0))
2296                 return ret;
2297         ret = 0;
2298
2299         while (1) {
2300                 cond_resched();
2301
2302                 leaf = path->nodes[0];
2303                 slot = path->slots[0];
2304
2305                 if (slot >= btrfs_header_nritems(leaf)) {
2306                         ret = btrfs_next_leaf(root, path);
2307                         if (ret < 0) {
2308                                 goto out;
2309                         } else if (ret > 0) {
2310                                 ret = 0;
2311                                 goto out;
2312                         }
2313                         continue;
2314                 }
2315
2316                 path->slots[0]++;
2317
2318                 btrfs_item_key_to_cpu(leaf, &key, slot);
2319
2320                 if (key.objectid > inum)
2321                         goto out;
2322
2323                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2324                         continue;
2325
2326                 extent = btrfs_item_ptr(leaf, slot,
2327                                         struct btrfs_file_extent_item);
2328
2329                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2330                         continue;
2331
2332                 /*
2333                  * 'offset' refers to the exact key.offset,
2334                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2335                  * (key.offset - extent_offset).
2336                  */
2337                 if (key.offset != offset)
2338                         continue;
2339
2340                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2341                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2342
2343                 if (extent_offset >= old->extent_offset + old->offset +
2344                     old->len || extent_offset + num_bytes <=
2345                     old->extent_offset + old->offset)
2346                         continue;
2347                 break;
2348         }
2349
2350         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2351         if (!backref) {
2352                 ret = -ENOENT;
2353                 goto out;
2354         }
2355
2356         backref->root_id = root_id;
2357         backref->inum = inum;
2358         backref->file_pos = offset;
2359         backref->num_bytes = num_bytes;
2360         backref->extent_offset = extent_offset;
2361         backref->generation = btrfs_file_extent_generation(leaf, extent);
2362         backref->old = old;
2363         backref_insert(&new->root, backref);
2364         old->count++;
2365 out:
2366         btrfs_release_path(path);
2367         WARN_ON(ret);
2368         return ret;
2369 }
2370
2371 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2372                                    struct new_sa_defrag_extent *new)
2373 {
2374         struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2375         struct old_sa_defrag_extent *old, *tmp;
2376         int ret;
2377
2378         new->path = path;
2379
2380         list_for_each_entry_safe(old, tmp, &new->head, list) {
2381                 ret = iterate_inodes_from_logical(old->bytenr +
2382                                                   old->extent_offset, fs_info,
2383                                                   path, record_one_backref,
2384                                                   old);
2385                 if (ret < 0 && ret != -ENOENT)
2386                         return false;
2387
2388                 /* no backref to be processed for this extent */
2389                 if (!old->count) {
2390                         list_del(&old->list);
2391                         kfree(old);
2392                 }
2393         }
2394
2395         if (list_empty(&new->head))
2396                 return false;
2397
2398         return true;
2399 }
2400
2401 static int relink_is_mergable(struct extent_buffer *leaf,
2402                               struct btrfs_file_extent_item *fi,
2403                               struct new_sa_defrag_extent *new)
2404 {
2405         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2406                 return 0;
2407
2408         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2409                 return 0;
2410
2411         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2412                 return 0;
2413
2414         if (btrfs_file_extent_encryption(leaf, fi) ||
2415             btrfs_file_extent_other_encoding(leaf, fi))
2416                 return 0;
2417
2418         return 1;
2419 }
2420
2421 /*
2422  * Note the backref might has changed, and in this case we just return 0.
2423  */
2424 static noinline int relink_extent_backref(struct btrfs_path *path,
2425                                  struct sa_defrag_extent_backref *prev,
2426                                  struct sa_defrag_extent_backref *backref)
2427 {
2428         struct btrfs_file_extent_item *extent;
2429         struct btrfs_file_extent_item *item;
2430         struct btrfs_ordered_extent *ordered;
2431         struct btrfs_trans_handle *trans;
2432         struct btrfs_fs_info *fs_info;
2433         struct btrfs_root *root;
2434         struct btrfs_key key;
2435         struct extent_buffer *leaf;
2436         struct old_sa_defrag_extent *old = backref->old;
2437         struct new_sa_defrag_extent *new = old->new;
2438         struct inode *src_inode = new->inode;
2439         struct inode *inode;
2440         struct extent_state *cached = NULL;
2441         int ret = 0;
2442         u64 start;
2443         u64 len;
2444         u64 lock_start;
2445         u64 lock_end;
2446         bool merge = false;
2447         int index;
2448
2449         if (prev && prev->root_id == backref->root_id &&
2450             prev->inum == backref->inum &&
2451             prev->file_pos + prev->num_bytes == backref->file_pos)
2452                 merge = true;
2453
2454         /* step 1: get root */
2455         key.objectid = backref->root_id;
2456         key.type = BTRFS_ROOT_ITEM_KEY;
2457         key.offset = (u64)-1;
2458
2459         fs_info = BTRFS_I(src_inode)->root->fs_info;
2460         index = srcu_read_lock(&fs_info->subvol_srcu);
2461
2462         root = btrfs_read_fs_root_no_name(fs_info, &key);
2463         if (IS_ERR(root)) {
2464                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2465                 if (PTR_ERR(root) == -ENOENT)
2466                         return 0;
2467                 return PTR_ERR(root);
2468         }
2469
2470         if (btrfs_root_readonly(root)) {
2471                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2472                 return 0;
2473         }
2474
2475         /* step 2: get inode */
2476         key.objectid = backref->inum;
2477         key.type = BTRFS_INODE_ITEM_KEY;
2478         key.offset = 0;
2479
2480         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2481         if (IS_ERR(inode)) {
2482                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2483                 return 0;
2484         }
2485
2486         srcu_read_unlock(&fs_info->subvol_srcu, index);
2487
2488         /* step 3: relink backref */
2489         lock_start = backref->file_pos;
2490         lock_end = backref->file_pos + backref->num_bytes - 1;
2491         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2492                          &cached);
2493
2494         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2495         if (ordered) {
2496                 btrfs_put_ordered_extent(ordered);
2497                 goto out_unlock;
2498         }
2499
2500         trans = btrfs_join_transaction(root);
2501         if (IS_ERR(trans)) {
2502                 ret = PTR_ERR(trans);
2503                 goto out_unlock;
2504         }
2505
2506         key.objectid = backref->inum;
2507         key.type = BTRFS_EXTENT_DATA_KEY;
2508         key.offset = backref->file_pos;
2509
2510         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2511         if (ret < 0) {
2512                 goto out_free_path;
2513         } else if (ret > 0) {
2514                 ret = 0;
2515                 goto out_free_path;
2516         }
2517
2518         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2519                                 struct btrfs_file_extent_item);
2520
2521         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2522             backref->generation)
2523                 goto out_free_path;
2524
2525         btrfs_release_path(path);
2526
2527         start = backref->file_pos;
2528         if (backref->extent_offset < old->extent_offset + old->offset)
2529                 start += old->extent_offset + old->offset -
2530                          backref->extent_offset;
2531
2532         len = min(backref->extent_offset + backref->num_bytes,
2533                   old->extent_offset + old->offset + old->len);
2534         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2535
2536         ret = btrfs_drop_extents(trans, root, inode, start,
2537                                  start + len, 1);
2538         if (ret)
2539                 goto out_free_path;
2540 again:
2541         key.objectid = btrfs_ino(inode);
2542         key.type = BTRFS_EXTENT_DATA_KEY;
2543         key.offset = start;
2544
2545         path->leave_spinning = 1;
2546         if (merge) {
2547                 struct btrfs_file_extent_item *fi;
2548                 u64 extent_len;
2549                 struct btrfs_key found_key;
2550
2551                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2552                 if (ret < 0)
2553                         goto out_free_path;
2554
2555                 path->slots[0]--;
2556                 leaf = path->nodes[0];
2557                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2558
2559                 fi = btrfs_item_ptr(leaf, path->slots[0],
2560                                     struct btrfs_file_extent_item);
2561                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2562
2563                 if (extent_len + found_key.offset == start &&
2564                     relink_is_mergable(leaf, fi, new)) {
2565                         btrfs_set_file_extent_num_bytes(leaf, fi,
2566                                                         extent_len + len);
2567                         btrfs_mark_buffer_dirty(leaf);
2568                         inode_add_bytes(inode, len);
2569
2570                         ret = 1;
2571                         goto out_free_path;
2572                 } else {
2573                         merge = false;
2574                         btrfs_release_path(path);
2575                         goto again;
2576                 }
2577         }
2578
2579         ret = btrfs_insert_empty_item(trans, root, path, &key,
2580                                         sizeof(*extent));
2581         if (ret) {
2582                 btrfs_abort_transaction(trans, root, ret);
2583                 goto out_free_path;
2584         }
2585
2586         leaf = path->nodes[0];
2587         item = btrfs_item_ptr(leaf, path->slots[0],
2588                                 struct btrfs_file_extent_item);
2589         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2590         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2591         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2592         btrfs_set_file_extent_num_bytes(leaf, item, len);
2593         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2594         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2595         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2596         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2597         btrfs_set_file_extent_encryption(leaf, item, 0);
2598         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2599
2600         btrfs_mark_buffer_dirty(leaf);
2601         inode_add_bytes(inode, len);
2602         btrfs_release_path(path);
2603
2604         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2605                         new->disk_len, 0,
2606                         backref->root_id, backref->inum,
2607                         new->file_pos); /* start - extent_offset */
2608         if (ret) {
2609                 btrfs_abort_transaction(trans, root, ret);
2610                 goto out_free_path;
2611         }
2612
2613         ret = 1;
2614 out_free_path:
2615         btrfs_release_path(path);
2616         path->leave_spinning = 0;
2617         btrfs_end_transaction(trans, root);
2618 out_unlock:
2619         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2620                              &cached, GFP_NOFS);
2621         iput(inode);
2622         return ret;
2623 }
2624
2625 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2626 {
2627         struct old_sa_defrag_extent *old, *tmp;
2628
2629         if (!new)
2630                 return;
2631
2632         list_for_each_entry_safe(old, tmp, &new->head, list) {
2633                 kfree(old);
2634         }
2635         kfree(new);
2636 }
2637
2638 static void relink_file_extents(struct new_sa_defrag_extent *new)
2639 {
2640         struct btrfs_path *path;
2641         struct sa_defrag_extent_backref *backref;
2642         struct sa_defrag_extent_backref *prev = NULL;
2643         struct inode *inode;
2644         struct btrfs_root *root;
2645         struct rb_node *node;
2646         int ret;
2647
2648         inode = new->inode;
2649         root = BTRFS_I(inode)->root;
2650
2651         path = btrfs_alloc_path();
2652         if (!path)
2653                 return;
2654
2655         if (!record_extent_backrefs(path, new)) {
2656                 btrfs_free_path(path);
2657                 goto out;
2658         }
2659         btrfs_release_path(path);
2660
2661         while (1) {
2662                 node = rb_first(&new->root);
2663                 if (!node)
2664                         break;
2665                 rb_erase(node, &new->root);
2666
2667                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2668
2669                 ret = relink_extent_backref(path, prev, backref);
2670                 WARN_ON(ret < 0);
2671
2672                 kfree(prev);
2673
2674                 if (ret == 1)
2675                         prev = backref;
2676                 else
2677                         prev = NULL;
2678                 cond_resched();
2679         }
2680         kfree(prev);
2681
2682         btrfs_free_path(path);
2683 out:
2684         free_sa_defrag_extent(new);
2685
2686         atomic_dec(&root->fs_info->defrag_running);
2687         wake_up(&root->fs_info->transaction_wait);
2688 }
2689
2690 static struct new_sa_defrag_extent *
2691 record_old_file_extents(struct inode *inode,
2692                         struct btrfs_ordered_extent *ordered)
2693 {
2694         struct btrfs_root *root = BTRFS_I(inode)->root;
2695         struct btrfs_path *path;
2696         struct btrfs_key key;
2697         struct old_sa_defrag_extent *old;
2698         struct new_sa_defrag_extent *new;
2699         int ret;
2700
2701         new = kmalloc(sizeof(*new), GFP_NOFS);
2702         if (!new)
2703                 return NULL;
2704
2705         new->inode = inode;
2706         new->file_pos = ordered->file_offset;
2707         new->len = ordered->len;
2708         new->bytenr = ordered->start;
2709         new->disk_len = ordered->disk_len;
2710         new->compress_type = ordered->compress_type;
2711         new->root = RB_ROOT;
2712         INIT_LIST_HEAD(&new->head);
2713
2714         path = btrfs_alloc_path();
2715         if (!path)
2716                 goto out_kfree;
2717
2718         key.objectid = btrfs_ino(inode);
2719         key.type = BTRFS_EXTENT_DATA_KEY;
2720         key.offset = new->file_pos;
2721
2722         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2723         if (ret < 0)
2724                 goto out_free_path;
2725         if (ret > 0 && path->slots[0] > 0)
2726                 path->slots[0]--;
2727
2728         /* find out all the old extents for the file range */
2729         while (1) {
2730                 struct btrfs_file_extent_item *extent;
2731                 struct extent_buffer *l;
2732                 int slot;
2733                 u64 num_bytes;
2734                 u64 offset;
2735                 u64 end;
2736                 u64 disk_bytenr;
2737                 u64 extent_offset;
2738
2739                 l = path->nodes[0];
2740                 slot = path->slots[0];
2741
2742                 if (slot >= btrfs_header_nritems(l)) {
2743                         ret = btrfs_next_leaf(root, path);
2744                         if (ret < 0)
2745                                 goto out_free_path;
2746                         else if (ret > 0)
2747                                 break;
2748                         continue;
2749                 }
2750
2751                 btrfs_item_key_to_cpu(l, &key, slot);
2752
2753                 if (key.objectid != btrfs_ino(inode))
2754                         break;
2755                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2756                         break;
2757                 if (key.offset >= new->file_pos + new->len)
2758                         break;
2759
2760                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2761
2762                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2763                 if (key.offset + num_bytes < new->file_pos)
2764                         goto next;
2765
2766                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2767                 if (!disk_bytenr)
2768                         goto next;
2769
2770                 extent_offset = btrfs_file_extent_offset(l, extent);
2771
2772                 old = kmalloc(sizeof(*old), GFP_NOFS);
2773                 if (!old)
2774                         goto out_free_path;
2775
2776                 offset = max(new->file_pos, key.offset);
2777                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2778
2779                 old->bytenr = disk_bytenr;
2780                 old->extent_offset = extent_offset;
2781                 old->offset = offset - key.offset;
2782                 old->len = end - offset;
2783                 old->new = new;
2784                 old->count = 0;
2785                 list_add_tail(&old->list, &new->head);
2786 next:
2787                 path->slots[0]++;
2788                 cond_resched();
2789         }
2790
2791         btrfs_free_path(path);
2792         atomic_inc(&root->fs_info->defrag_running);
2793
2794         return new;
2795
2796 out_free_path:
2797         btrfs_free_path(path);
2798 out_kfree:
2799         free_sa_defrag_extent(new);
2800         return NULL;
2801 }
2802
2803 static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
2804                                          u64 start, u64 len)
2805 {
2806         struct btrfs_block_group_cache *cache;
2807
2808         cache = btrfs_lookup_block_group(root->fs_info, start);
2809         ASSERT(cache);
2810
2811         spin_lock(&cache->lock);
2812         cache->delalloc_bytes -= len;
2813         spin_unlock(&cache->lock);
2814
2815         btrfs_put_block_group(cache);
2816 }
2817
2818 /* as ordered data IO finishes, this gets called so we can finish
2819  * an ordered extent if the range of bytes in the file it covers are
2820  * fully written.
2821  */
2822 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2823 {
2824         struct inode *inode = ordered_extent->inode;
2825         struct btrfs_root *root = BTRFS_I(inode)->root;
2826         struct btrfs_trans_handle *trans = NULL;
2827         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2828         struct extent_state *cached_state = NULL;
2829         struct new_sa_defrag_extent *new = NULL;
2830         int compress_type = 0;
2831         int ret = 0;
2832         u64 logical_len = ordered_extent->len;
2833         bool nolock;
2834         bool truncated = false;
2835
2836         nolock = btrfs_is_free_space_inode(inode);
2837
2838         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2839                 ret = -EIO;
2840                 goto out;
2841         }
2842
2843         btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
2844                                      ordered_extent->file_offset +
2845                                      ordered_extent->len - 1);
2846
2847         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2848                 truncated = true;
2849                 logical_len = ordered_extent->truncated_len;
2850                 /* Truncated the entire extent, don't bother adding */
2851                 if (!logical_len)
2852                         goto out;
2853         }
2854
2855         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2856                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2857
2858                 /*
2859                  * For mwrite(mmap + memset to write) case, we still reserve
2860                  * space for NOCOW range.
2861                  * As NOCOW won't cause a new delayed ref, just free the space
2862                  */
2863                 btrfs_qgroup_free_data(inode, ordered_extent->file_offset,
2864                                        ordered_extent->len);
2865                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2866                 if (nolock)
2867                         trans = btrfs_join_transaction_nolock(root);
2868                 else
2869                         trans = btrfs_join_transaction(root);
2870                 if (IS_ERR(trans)) {
2871                         ret = PTR_ERR(trans);
2872                         trans = NULL;
2873                         goto out;
2874                 }
2875                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2876                 ret = btrfs_update_inode_fallback(trans, root, inode);
2877                 if (ret) /* -ENOMEM or corruption */
2878                         btrfs_abort_transaction(trans, root, ret);
2879                 goto out;
2880         }
2881
2882         lock_extent_bits(io_tree, ordered_extent->file_offset,
2883                          ordered_extent->file_offset + ordered_extent->len - 1,
2884                          &cached_state);
2885
2886         ret = test_range_bit(io_tree, ordered_extent->file_offset,
2887                         ordered_extent->file_offset + ordered_extent->len - 1,
2888                         EXTENT_DEFRAG, 1, cached_state);
2889         if (ret) {
2890                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2891                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2892                         /* the inode is shared */
2893                         new = record_old_file_extents(inode, ordered_extent);
2894
2895                 clear_extent_bit(io_tree, ordered_extent->file_offset,
2896                         ordered_extent->file_offset + ordered_extent->len - 1,
2897                         EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2898         }
2899
2900         if (nolock)
2901                 trans = btrfs_join_transaction_nolock(root);
2902         else
2903                 trans = btrfs_join_transaction(root);
2904         if (IS_ERR(trans)) {
2905                 ret = PTR_ERR(trans);
2906                 trans = NULL;
2907                 goto out_unlock;
2908         }
2909
2910         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2911
2912         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2913                 compress_type = ordered_extent->compress_type;
2914         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2915                 BUG_ON(compress_type);
2916                 ret = btrfs_mark_extent_written(trans, inode,
2917                                                 ordered_extent->file_offset,
2918                                                 ordered_extent->file_offset +
2919                                                 logical_len);
2920         } else {
2921                 BUG_ON(root == root->fs_info->tree_root);
2922                 ret = insert_reserved_file_extent(trans, inode,
2923                                                 ordered_extent->file_offset,
2924                                                 ordered_extent->start,
2925                                                 ordered_extent->disk_len,
2926                                                 logical_len, logical_len,
2927                                                 compress_type, 0, 0,
2928                                                 BTRFS_FILE_EXTENT_REG);
2929                 if (!ret)
2930                         btrfs_release_delalloc_bytes(root,
2931                                                      ordered_extent->start,
2932                                                      ordered_extent->disk_len);
2933         }
2934         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2935                            ordered_extent->file_offset, ordered_extent->len,
2936                            trans->transid);
2937         if (ret < 0) {
2938                 btrfs_abort_transaction(trans, root, ret);
2939                 goto out_unlock;
2940         }
2941
2942         add_pending_csums(trans, inode, ordered_extent->file_offset,
2943                           &ordered_extent->list);
2944
2945         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2946         ret = btrfs_update_inode_fallback(trans, root, inode);
2947         if (ret) { /* -ENOMEM or corruption */
2948                 btrfs_abort_transaction(trans, root, ret);
2949                 goto out_unlock;
2950         }
2951         ret = 0;
2952 out_unlock:
2953         unlock_extent_cached(io_tree, ordered_extent->file_offset,
2954                              ordered_extent->file_offset +
2955                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
2956 out:
2957         if (root != root->fs_info->tree_root)
2958                 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2959         if (trans)
2960                 btrfs_end_transaction(trans, root);
2961
2962         if (ret || truncated) {
2963                 u64 start, end;
2964
2965                 if (truncated)
2966                         start = ordered_extent->file_offset + logical_len;
2967                 else
2968                         start = ordered_extent->file_offset;
2969                 end = ordered_extent->file_offset + ordered_extent->len - 1;
2970                 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2971
2972                 /* Drop the cache for the part of the extent we didn't write. */
2973                 btrfs_drop_extent_cache(inode, start, end, 0);
2974
2975                 /*
2976                  * If the ordered extent had an IOERR or something else went
2977                  * wrong we need to return the space for this ordered extent
2978                  * back to the allocator.  We only free the extent in the
2979                  * truncated case if we didn't write out the extent at all.
2980                  */
2981                 if ((ret || !logical_len) &&
2982                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2983                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2984                         btrfs_free_reserved_extent(root, ordered_extent->start,
2985                                                    ordered_extent->disk_len, 1);
2986         }
2987
2988
2989         /*
2990          * This needs to be done to make sure anybody waiting knows we are done
2991          * updating everything for this ordered extent.
2992          */
2993         btrfs_remove_ordered_extent(inode, ordered_extent);
2994
2995         /* for snapshot-aware defrag */
2996         if (new) {
2997                 if (ret) {
2998                         free_sa_defrag_extent(new);
2999                         atomic_dec(&root->fs_info->defrag_running);
3000                 } else {
3001                         relink_file_extents(new);
3002                 }
3003         }
3004
3005         /* once for us */
3006         btrfs_put_ordered_extent(ordered_extent);
3007         /* once for the tree */
3008         btrfs_put_ordered_extent(ordered_extent);
3009
3010         return ret;
3011 }
3012
3013 static void finish_ordered_fn(struct btrfs_work *work)
3014 {
3015         struct btrfs_ordered_extent *ordered_extent;
3016         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
3017         btrfs_finish_ordered_io(ordered_extent);
3018 }
3019
3020 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
3021                                 struct extent_state *state, int uptodate)
3022 {
3023         struct inode *inode = page->mapping->host;
3024         struct btrfs_root *root = BTRFS_I(inode)->root;
3025         struct btrfs_ordered_extent *ordered_extent = NULL;
3026         struct btrfs_workqueue *wq;
3027         btrfs_work_func_t func;
3028
3029         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
3030
3031         ClearPagePrivate2(page);
3032         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
3033                                             end - start + 1, uptodate))
3034                 return 0;
3035
3036         if (btrfs_is_free_space_inode(inode)) {
3037                 wq = root->fs_info->endio_freespace_worker;
3038                 func = btrfs_freespace_write_helper;
3039         } else {
3040                 wq = root->fs_info->endio_write_workers;
3041                 func = btrfs_endio_write_helper;
3042         }
3043
3044         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3045                         NULL);
3046         btrfs_queue_work(wq, &ordered_extent->work);
3047
3048         return 0;
3049 }
3050
3051 static int __readpage_endio_check(struct inode *inode,
3052                                   struct btrfs_io_bio *io_bio,
3053                                   int icsum, struct page *page,
3054                                   int pgoff, u64 start, size_t len)
3055 {
3056         char *kaddr;
3057         u32 csum_expected;
3058         u32 csum = ~(u32)0;
3059
3060         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3061
3062         kaddr = kmap_atomic(page);
3063         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3064         btrfs_csum_final(csum, (char *)&csum);
3065         if (csum != csum_expected)
3066                 goto zeroit;
3067
3068         kunmap_atomic(kaddr);
3069         return 0;
3070 zeroit:
3071         btrfs_warn_rl(BTRFS_I(inode)->root->fs_info,
3072                 "csum failed ino %llu off %llu csum %u expected csum %u",
3073                            btrfs_ino(inode), start, csum, csum_expected);
3074         memset(kaddr + pgoff, 1, len);
3075         flush_dcache_page(page);
3076         kunmap_atomic(kaddr);
3077         if (csum_expected == 0)
3078                 return 0;
3079         return -EIO;
3080 }
3081
3082 /*
3083  * when reads are done, we need to check csums to verify the data is correct
3084  * if there's a match, we allow the bio to finish.  If not, the code in
3085  * extent_io.c will try to find good copies for us.
3086  */
3087 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3088                                       u64 phy_offset, struct page *page,
3089                                       u64 start, u64 end, int mirror)
3090 {
3091         size_t offset = start - page_offset(page);
3092         struct inode *inode = page->mapping->host;
3093         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3094         struct btrfs_root *root = BTRFS_I(inode)->root;
3095
3096         if (PageChecked(page)) {
3097                 ClearPageChecked(page);
3098                 return 0;
3099         }
3100
3101         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3102                 return 0;
3103
3104         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3105             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3106                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
3107                                   GFP_NOFS);
3108                 return 0;
3109         }
3110
3111         phy_offset >>= inode->i_sb->s_blocksize_bits;
3112         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3113                                       start, (size_t)(end - start + 1));
3114 }
3115
3116 struct delayed_iput {
3117         struct list_head list;
3118         struct inode *inode;
3119 };
3120
3121 /* JDM: If this is fs-wide, why can't we add a pointer to
3122  * btrfs_inode instead and avoid the allocation? */
3123 void btrfs_add_delayed_iput(struct inode *inode)
3124 {
3125         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3126         struct delayed_iput *delayed;
3127
3128         if (atomic_add_unless(&inode->i_count, -1, 1))
3129                 return;
3130
3131         delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
3132         delayed->inode = inode;
3133
3134         spin_lock(&fs_info->delayed_iput_lock);
3135         list_add_tail(&delayed->list, &fs_info->delayed_iputs);
3136         spin_unlock(&fs_info->delayed_iput_lock);
3137 }
3138
3139 void btrfs_run_delayed_iputs(struct btrfs_root *root)
3140 {
3141         LIST_HEAD(list);
3142         struct btrfs_fs_info *fs_info = root->fs_info;
3143         struct delayed_iput *delayed;
3144         int empty;
3145
3146         spin_lock(&fs_info->delayed_iput_lock);
3147         empty = list_empty(&fs_info->delayed_iputs);
3148         spin_unlock(&fs_info->delayed_iput_lock);
3149         if (empty)
3150                 return;
3151
3152         down_read(&fs_info->delayed_iput_sem);
3153
3154         spin_lock(&fs_info->delayed_iput_lock);
3155         list_splice_init(&fs_info->delayed_iputs, &list);
3156         spin_unlock(&fs_info->delayed_iput_lock);
3157
3158         while (!list_empty(&list)) {
3159                 delayed = list_entry(list.next, struct delayed_iput, list);
3160                 list_del(&delayed->list);
3161                 iput(delayed->inode);
3162                 kfree(delayed);
3163         }
3164
3165         up_read(&root->fs_info->delayed_iput_sem);
3166 }
3167
3168 /*
3169  * This is called in transaction commit time. If there are no orphan
3170  * files in the subvolume, it removes orphan item and frees block_rsv
3171  * structure.
3172  */
3173 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3174                               struct btrfs_root *root)
3175 {
3176         struct btrfs_block_rsv *block_rsv;
3177         int ret;
3178
3179         if (atomic_read(&root->orphan_inodes) ||
3180             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3181                 return;
3182
3183         spin_lock(&root->orphan_lock);
3184         if (atomic_read(&root->orphan_inodes)) {
3185                 spin_unlock(&root->orphan_lock);
3186                 return;
3187         }
3188
3189         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3190                 spin_unlock(&root->orphan_lock);
3191                 return;
3192         }
3193
3194         block_rsv = root->orphan_block_rsv;
3195         root->orphan_block_rsv = NULL;
3196         spin_unlock(&root->orphan_lock);
3197
3198         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3199             btrfs_root_refs(&root->root_item) > 0) {
3200                 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
3201                                             root->root_key.objectid);
3202                 if (ret)
3203                         btrfs_abort_transaction(trans, root, ret);
3204                 else
3205                         clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3206                                   &root->state);
3207         }
3208
3209         if (block_rsv) {
3210                 WARN_ON(block_rsv->size > 0);
3211                 btrfs_free_block_rsv(root, block_rsv);
3212         }
3213 }
3214
3215 /*
3216  * This creates an orphan entry for the given inode in case something goes
3217  * wrong in the middle of an unlink/truncate.
3218  *
3219  * NOTE: caller of this function should reserve 5 units of metadata for
3220  *       this function.
3221  */
3222 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3223 {
3224         struct btrfs_root *root = BTRFS_I(inode)->root;
3225         struct btrfs_block_rsv *block_rsv = NULL;
3226         int reserve = 0;
3227         int insert = 0;
3228         int ret;
3229
3230         if (!root->orphan_block_rsv) {
3231                 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3232                 if (!block_rsv)
3233                         return -ENOMEM;
3234         }
3235
3236         spin_lock(&root->orphan_lock);
3237         if (!root->orphan_block_rsv) {
3238                 root->orphan_block_rsv = block_rsv;
3239         } else if (block_rsv) {
3240                 btrfs_free_block_rsv(root, block_rsv);
3241                 block_rsv = NULL;
3242         }
3243
3244         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3245                               &BTRFS_I(inode)->runtime_flags)) {
3246 #if 0
3247                 /*
3248                  * For proper ENOSPC handling, we should do orphan
3249                  * cleanup when mounting. But this introduces backward
3250                  * compatibility issue.
3251                  */
3252                 if (!xchg(&root->orphan_item_inserted, 1))
3253                         insert = 2;
3254                 else
3255                         insert = 1;
3256 #endif
3257                 insert = 1;
3258                 atomic_inc(&root->orphan_inodes);
3259         }
3260
3261         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3262                               &BTRFS_I(inode)->runtime_flags))
3263                 reserve = 1;
3264         spin_unlock(&root->orphan_lock);
3265
3266         /* grab metadata reservation from transaction handle */
3267         if (reserve) {
3268                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3269                 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3270         }
3271
3272         /* insert an orphan item to track this unlinked/truncated file */
3273         if (insert >= 1) {
3274                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3275                 if (ret) {
3276                         atomic_dec(&root->orphan_inodes);
3277                         if (reserve) {
3278                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3279                                           &BTRFS_I(inode)->runtime_flags);
3280                                 btrfs_orphan_release_metadata(inode);
3281                         }
3282                         if (ret != -EEXIST) {
3283                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3284                                           &BTRFS_I(inode)->runtime_flags);
3285                                 btrfs_abort_transaction(trans, root, ret);
3286                                 return ret;
3287                         }
3288                 }
3289                 ret = 0;
3290         }
3291
3292         /* insert an orphan item to track subvolume contains orphan files */
3293         if (insert >= 2) {
3294                 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3295                                                root->root_key.objectid);
3296                 if (ret && ret != -EEXIST) {
3297                         btrfs_abort_transaction(trans, root, ret);
3298                         return ret;
3299                 }
3300         }
3301         return 0;
3302 }
3303
3304 /*
3305  * We have done the truncate/delete so we can go ahead and remove the orphan
3306  * item for this particular inode.
3307  */
3308 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3309                             struct inode *inode)
3310 {
3311         struct btrfs_root *root = BTRFS_I(inode)->root;
3312         int delete_item = 0;
3313         int release_rsv = 0;
3314         int ret = 0;
3315
3316         spin_lock(&root->orphan_lock);
3317         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3318                                &BTRFS_I(inode)->runtime_flags))
3319                 delete_item = 1;
3320
3321         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3322                                &BTRFS_I(inode)->runtime_flags))
3323                 release_rsv = 1;
3324         spin_unlock(&root->orphan_lock);
3325
3326         if (delete_item) {
3327                 atomic_dec(&root->orphan_inodes);
3328                 if (trans)
3329                         ret = btrfs_del_orphan_item(trans, root,
3330                                                     btrfs_ino(inode));
3331         }
3332
3333         if (release_rsv)
3334                 btrfs_orphan_release_metadata(inode);
3335
3336         return ret;
3337 }
3338
3339 /*
3340  * this cleans up any orphans that may be left on the list from the last use
3341  * of this root.
3342  */
3343 int btrfs_orphan_cleanup(struct btrfs_root *root)
3344 {
3345         struct btrfs_path *path;
3346         struct extent_buffer *leaf;
3347         struct btrfs_key key, found_key;
3348         struct btrfs_trans_handle *trans;
3349         struct inode *inode;
3350         u64 last_objectid = 0;
3351         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3352
3353         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3354                 return 0;
3355
3356         path = btrfs_alloc_path();
3357         if (!path) {
3358                 ret = -ENOMEM;
3359                 goto out;
3360         }
3361         path->reada = -1;
3362
3363         key.objectid = BTRFS_ORPHAN_OBJECTID;
3364         key.type = BTRFS_ORPHAN_ITEM_KEY;
3365         key.offset = (u64)-1;
3366
3367         while (1) {
3368                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3369                 if (ret < 0)
3370                         goto out;
3371
3372                 /*
3373                  * if ret == 0 means we found what we were searching for, which
3374                  * is weird, but possible, so only screw with path if we didn't
3375                  * find the key and see if we have stuff that matches
3376                  */
3377                 if (ret > 0) {
3378                         ret = 0;
3379                         if (path->slots[0] == 0)
3380                                 break;
3381                         path->slots[0]--;
3382                 }
3383
3384                 /* pull out the item */
3385                 leaf = path->nodes[0];
3386                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3387
3388                 /* make sure the item matches what we want */
3389                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3390                         break;
3391                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3392                         break;
3393
3394                 /* release the path since we're done with it */
3395                 btrfs_release_path(path);
3396
3397                 /*
3398                  * this is where we are basically btrfs_lookup, without the
3399                  * crossing root thing.  we store the inode number in the
3400                  * offset of the orphan item.
3401                  */
3402
3403                 if (found_key.offset == last_objectid) {
3404                         btrfs_err(root->fs_info,
3405                                 "Error removing orphan entry, stopping orphan cleanup");
3406                         ret = -EINVAL;
3407                         goto out;
3408                 }
3409
3410                 last_objectid = found_key.offset;
3411
3412                 found_key.objectid = found_key.offset;
3413                 found_key.type = BTRFS_INODE_ITEM_KEY;
3414                 found_key.offset = 0;
3415                 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3416                 ret = PTR_ERR_OR_ZERO(inode);
3417                 if (ret && ret != -ESTALE)
3418                         goto out;
3419
3420                 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3421                         struct btrfs_root *dead_root;
3422                         struct btrfs_fs_info *fs_info = root->fs_info;
3423                         int is_dead_root = 0;
3424
3425                         /*
3426                          * this is an orphan in the tree root. Currently these
3427                          * could come from 2 sources:
3428                          *  a) a snapshot deletion in progress
3429                          *  b) a free space cache inode
3430                          * We need to distinguish those two, as the snapshot
3431                          * orphan must not get deleted.
3432                          * find_dead_roots already ran before us, so if this
3433                          * is a snapshot deletion, we should find the root
3434                          * in the dead_roots list
3435                          */
3436                         spin_lock(&fs_info->trans_lock);
3437                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3438                                             root_list) {
3439                                 if (dead_root->root_key.objectid ==
3440                                     found_key.objectid) {
3441                                         is_dead_root = 1;
3442                                         break;
3443                                 }
3444                         }
3445                         spin_unlock(&fs_info->trans_lock);
3446                         if (is_dead_root) {
3447                                 /* prevent this orphan from being found again */
3448                                 key.offset = found_key.objectid - 1;
3449                                 continue;
3450                         }
3451                 }
3452                 /*
3453                  * Inode is already gone but the orphan item is still there,
3454                  * kill the orphan item.
3455                  */
3456                 if (ret == -ESTALE) {
3457                         trans = btrfs_start_transaction(root, 1);
3458                         if (IS_ERR(trans)) {
3459                                 ret = PTR_ERR(trans);
3460                                 goto out;
3461                         }
3462                         btrfs_debug(root->fs_info, "auto deleting %Lu",
3463                                 found_key.objectid);
3464                         ret = btrfs_del_orphan_item(trans, root,
3465                                                     found_key.objectid);
3466                         btrfs_end_transaction(trans, root);
3467                         if (ret)
3468                                 goto out;
3469                         continue;
3470                 }
3471
3472                 /*
3473                  * add this inode to the orphan list so btrfs_orphan_del does
3474                  * the proper thing when we hit it
3475                  */
3476                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3477                         &BTRFS_I(inode)->runtime_flags);
3478                 atomic_inc(&root->orphan_inodes);
3479
3480                 /* if we have links, this was a truncate, lets do that */
3481                 if (inode->i_nlink) {
3482                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3483                                 iput(inode);
3484                                 continue;
3485                         }
3486                         nr_truncate++;
3487
3488                         /* 1 for the orphan item deletion. */
3489                         trans = btrfs_start_transaction(root, 1);
3490                         if (IS_ERR(trans)) {
3491                                 iput(inode);
3492                                 ret = PTR_ERR(trans);
3493                                 goto out;
3494                         }
3495                         ret = btrfs_orphan_add(trans, inode);
3496                         btrfs_end_transaction(trans, root);
3497                         if (ret) {
3498                                 iput(inode);
3499                                 goto out;
3500                         }
3501
3502                         ret = btrfs_truncate(inode);
3503                         if (ret)
3504                                 btrfs_orphan_del(NULL, inode);
3505                 } else {
3506                         nr_unlink++;
3507                 }
3508
3509                 /* this will do delete_inode and everything for us */
3510                 iput(inode);
3511                 if (ret)
3512                         goto out;
3513         }
3514         /* release the path since we're done with it */
3515         btrfs_release_path(path);
3516
3517         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3518
3519         if (root->orphan_block_rsv)
3520                 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3521                                         (u64)-1);
3522
3523         if (root->orphan_block_rsv ||
3524             test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3525                 trans = btrfs_join_transaction(root);
3526                 if (!IS_ERR(trans))
3527                         btrfs_end_transaction(trans, root);
3528         }
3529
3530         if (nr_unlink)
3531                 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3532         if (nr_truncate)
3533                 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3534
3535 out:
3536         if (ret)
3537                 btrfs_err(root->fs_info,
3538                         "could not do orphan cleanup %d", ret);
3539         btrfs_free_path(path);
3540         return ret;
3541 }
3542
3543 /*
3544  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3545  * don't find any xattrs, we know there can't be any acls.
3546  *
3547  * slot is the slot the inode is in, objectid is the objectid of the inode
3548  */
3549 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3550                                           int slot, u64 objectid,
3551                                           int *first_xattr_slot)
3552 {
3553         u32 nritems = btrfs_header_nritems(leaf);
3554         struct btrfs_key found_key;
3555         static u64 xattr_access = 0;
3556         static u64 xattr_default = 0;
3557         int scanned = 0;
3558
3559         if (!xattr_access) {
3560                 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3561                                         strlen(POSIX_ACL_XATTR_ACCESS));
3562                 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3563                                         strlen(POSIX_ACL_XATTR_DEFAULT));
3564         }
3565
3566         slot++;
3567         *first_xattr_slot = -1;
3568         while (slot < nritems) {
3569                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3570
3571                 /* we found a different objectid, there must not be acls */
3572                 if (found_key.objectid != objectid)
3573                         return 0;
3574
3575                 /* we found an xattr, assume we've got an acl */
3576                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3577                         if (*first_xattr_slot == -1)
3578                                 *first_xattr_slot = slot;
3579                         if (found_key.offset == xattr_access ||
3580                             found_key.offset == xattr_default)
3581                                 return 1;
3582                 }
3583
3584                 /*
3585                  * we found a key greater than an xattr key, there can't
3586                  * be any acls later on
3587                  */
3588                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3589                         return 0;
3590
3591                 slot++;
3592                 scanned++;
3593
3594                 /*
3595                  * it goes inode, inode backrefs, xattrs, extents,
3596                  * so if there are a ton of hard links to an inode there can
3597                  * be a lot of backrefs.  Don't waste time searching too hard,
3598                  * this is just an optimization
3599                  */
3600                 if (scanned >= 8)
3601                         break;
3602         }
3603         /* we hit the end of the leaf before we found an xattr or
3604          * something larger than an xattr.  We have to assume the inode
3605          * has acls
3606          */
3607         if (*first_xattr_slot == -1)
3608                 *first_xattr_slot = slot;
3609         return 1;
3610 }
3611
3612 /*
3613  * read an inode from the btree into the in-memory inode
3614  */
3615 static void btrfs_read_locked_inode(struct inode *inode)
3616 {
3617         struct btrfs_path *path;
3618         struct extent_buffer *leaf;
3619         struct btrfs_inode_item *inode_item;
3620         struct btrfs_root *root = BTRFS_I(inode)->root;
3621         struct btrfs_key location;
3622         unsigned long ptr;
3623         int maybe_acls;
3624         u32 rdev;
3625         int ret;
3626         bool filled = false;
3627         int first_xattr_slot;
3628
3629         ret = btrfs_fill_inode(inode, &rdev);
3630         if (!ret)
3631                 filled = true;
3632
3633         path = btrfs_alloc_path();
3634         if (!path)
3635                 goto make_bad;
3636
3637         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3638
3639         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3640         if (ret)
3641                 goto make_bad;
3642
3643         leaf = path->nodes[0];
3644
3645         if (filled)
3646                 goto cache_index;
3647
3648         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3649                                     struct btrfs_inode_item);
3650         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3651         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3652         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3653         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3654         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3655
3656         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3657         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3658
3659         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3660         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3661
3662         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3663         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3664
3665         BTRFS_I(inode)->i_otime.tv_sec =
3666                 btrfs_timespec_sec(leaf, &inode_item->otime);
3667         BTRFS_I(inode)->i_otime.tv_nsec =
3668                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3669
3670         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3671         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3672         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3673
3674         inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3675         inode->i_generation = BTRFS_I(inode)->generation;
3676         inode->i_rdev = 0;
3677         rdev = btrfs_inode_rdev(leaf, inode_item);
3678
3679         BTRFS_I(inode)->index_cnt = (u64)-1;
3680         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3681
3682 cache_index:
3683         /*
3684          * If we were modified in the current generation and evicted from memory
3685          * and then re-read we need to do a full sync since we don't have any
3686          * idea about which extents were modified before we were evicted from
3687          * cache.
3688          *
3689          * This is required for both inode re-read from disk and delayed inode
3690          * in delayed_nodes_tree.
3691          */
3692         if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3693                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3694                         &BTRFS_I(inode)->runtime_flags);
3695
3696         /*
3697          * We don't persist the id of the transaction where an unlink operation
3698          * against the inode was last made. So here we assume the inode might
3699          * have been evicted, and therefore the exact value of last_unlink_trans
3700          * lost, and set it to last_trans to avoid metadata inconsistencies
3701          * between the inode and its parent if the inode is fsync'ed and the log
3702          * replayed. For example, in the scenario:
3703          *
3704          * touch mydir/foo
3705          * ln mydir/foo mydir/bar
3706          * sync
3707          * unlink mydir/bar
3708          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3709          * xfs_io -c fsync mydir/foo
3710          * <power failure>
3711          * mount fs, triggers fsync log replay
3712          *
3713          * We must make sure that when we fsync our inode foo we also log its
3714          * parent inode, otherwise after log replay the parent still has the
3715          * dentry with the "bar" name but our inode foo has a link count of 1
3716          * and doesn't have an inode ref with the name "bar" anymore.
3717          *
3718          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3719          * but it guarantees correctness at the expense of ocassional full
3720          * transaction commits on fsync if our inode is a directory, or if our
3721          * inode is not a directory, logging its parent unnecessarily.
3722          */
3723         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3724
3725         path->slots[0]++;
3726         if (inode->i_nlink != 1 ||
3727             path->slots[0] >= btrfs_header_nritems(leaf))
3728                 goto cache_acl;
3729
3730         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3731         if (location.objectid != btrfs_ino(inode))
3732                 goto cache_acl;
3733
3734         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3735         if (location.type == BTRFS_INODE_REF_KEY) {
3736                 struct btrfs_inode_ref *ref;
3737
3738                 ref = (struct btrfs_inode_ref *)ptr;
3739                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3740         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3741                 struct btrfs_inode_extref *extref;
3742
3743                 extref = (struct btrfs_inode_extref *)ptr;
3744                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3745                                                                      extref);
3746         }
3747 cache_acl:
3748         /*
3749          * try to precache a NULL acl entry for files that don't have
3750          * any xattrs or acls
3751          */
3752         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3753                                            btrfs_ino(inode), &first_xattr_slot);
3754         if (first_xattr_slot != -1) {
3755                 path->slots[0] = first_xattr_slot;
3756                 ret = btrfs_load_inode_props(inode, path);
3757                 if (ret)
3758                         btrfs_err(root->fs_info,
3759                                   "error loading props for ino %llu (root %llu): %d",
3760                                   btrfs_ino(inode),
3761                                   root->root_key.objectid, ret);
3762         }
3763         btrfs_free_path(path);
3764
3765         if (!maybe_acls)
3766                 cache_no_acl(inode);
3767
3768         switch (inode->i_mode & S_IFMT) {
3769         case S_IFREG:
3770                 inode->i_mapping->a_ops = &btrfs_aops;
3771                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3772                 inode->i_fop = &btrfs_file_operations;
3773                 inode->i_op = &btrfs_file_inode_operations;
3774                 break;
3775         case S_IFDIR:
3776                 inode->i_fop = &btrfs_dir_file_operations;
3777                 if (root == root->fs_info->tree_root)
3778                         inode->i_op = &btrfs_dir_ro_inode_operations;
3779                 else
3780                         inode->i_op = &btrfs_dir_inode_operations;
3781                 break;
3782         case S_IFLNK:
3783                 inode->i_op = &btrfs_symlink_inode_operations;
3784                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3785                 break;
3786         default:
3787                 inode->i_op = &btrfs_special_inode_operations;
3788                 init_special_inode(inode, inode->i_mode, rdev);
3789                 break;
3790         }
3791
3792         btrfs_update_iflags(inode);
3793         return;
3794
3795 make_bad:
3796         btrfs_free_path(path);
3797         make_bad_inode(inode);
3798 }
3799
3800 /*
3801  * given a leaf and an inode, copy the inode fields into the leaf
3802  */
3803 static void fill_inode_item(struct btrfs_trans_handle *trans,
3804                             struct extent_buffer *leaf,
3805                             struct btrfs_inode_item *item,
3806                             struct inode *inode)
3807 {
3808         struct btrfs_map_token token;
3809
3810         btrfs_init_map_token(&token);
3811
3812         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3813         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3814         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3815                                    &token);
3816         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3817         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3818
3819         btrfs_set_token_timespec_sec(leaf, &item->atime,
3820                                      inode->i_atime.tv_sec, &token);
3821         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3822                                       inode->i_atime.tv_nsec, &token);
3823
3824         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3825                                      inode->i_mtime.tv_sec, &token);
3826         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3827                                       inode->i_mtime.tv_nsec, &token);
3828
3829         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3830                                      inode->i_ctime.tv_sec, &token);
3831         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3832                                       inode->i_ctime.tv_nsec, &token);
3833
3834         btrfs_set_token_timespec_sec(leaf, &item->otime,
3835                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3836         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3837                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3838
3839         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3840                                      &token);
3841         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3842                                          &token);
3843         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3844         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3845         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3846         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3847         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3848 }
3849
3850 /*
3851  * copy everything in the in-memory inode into the btree.
3852  */
3853 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3854                                 struct btrfs_root *root, struct inode *inode)
3855 {
3856         struct btrfs_inode_item *inode_item;
3857         struct btrfs_path *path;
3858         struct extent_buffer *leaf;
3859         int ret;
3860
3861         path = btrfs_alloc_path();
3862         if (!path)
3863                 return -ENOMEM;
3864
3865         path->leave_spinning = 1;
3866         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3867                                  1);
3868         if (ret) {
3869                 if (ret > 0)
3870                         ret = -ENOENT;
3871                 goto failed;
3872         }
3873
3874         leaf = path->nodes[0];
3875         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3876                                     struct btrfs_inode_item);
3877
3878         fill_inode_item(trans, leaf, inode_item, inode);
3879         btrfs_mark_buffer_dirty(leaf);
3880         btrfs_set_inode_last_trans(trans, inode);
3881         ret = 0;
3882 failed:
3883         btrfs_free_path(path);
3884         return ret;
3885 }
3886
3887 /*
3888  * copy everything in the in-memory inode into the btree.
3889  */
3890 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3891                                 struct btrfs_root *root, struct inode *inode)
3892 {
3893         int ret;
3894
3895         /*
3896          * If the inode is a free space inode, we can deadlock during commit
3897          * if we put it into the delayed code.
3898          *
3899          * The data relocation inode should also be directly updated
3900          * without delay
3901          */
3902         if (!btrfs_is_free_space_inode(inode)
3903             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3904             && !root->fs_info->log_root_recovering) {
3905                 btrfs_update_root_times(trans, root);
3906
3907                 ret = btrfs_delayed_update_inode(trans, root, inode);
3908                 if (!ret)
3909                         btrfs_set_inode_last_trans(trans, inode);
3910                 return ret;
3911         }
3912
3913         return btrfs_update_inode_item(trans, root, inode);
3914 }
3915
3916 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3917                                          struct btrfs_root *root,
3918                                          struct inode *inode)
3919 {
3920         int ret;
3921
3922         ret = btrfs_update_inode(trans, root, inode);
3923         if (ret == -ENOSPC)
3924                 return btrfs_update_inode_item(trans, root, inode);
3925         return ret;
3926 }
3927
3928 /*
3929  * unlink helper that gets used here in inode.c and in the tree logging
3930  * recovery code.  It remove a link in a directory with a given name, and
3931  * also drops the back refs in the inode to the directory
3932  */
3933 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3934                                 struct btrfs_root *root,
3935                                 struct inode *dir, struct inode *inode,
3936                                 const char *name, int name_len)
3937 {
3938         struct btrfs_path *path;
3939         int ret = 0;
3940         struct extent_buffer *leaf;
3941         struct btrfs_dir_item *di;
3942         struct btrfs_key key;
3943         u64 index;
3944         u64 ino = btrfs_ino(inode);
3945         u64 dir_ino = btrfs_ino(dir);
3946
3947         path = btrfs_alloc_path();
3948         if (!path) {
3949                 ret = -ENOMEM;
3950                 goto out;
3951         }
3952
3953         path->leave_spinning = 1;
3954         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3955                                     name, name_len, -1);
3956         if (IS_ERR(di)) {
3957                 ret = PTR_ERR(di);
3958                 goto err;
3959         }
3960         if (!di) {
3961                 ret = -ENOENT;
3962                 goto err;
3963         }
3964         leaf = path->nodes[0];
3965         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3966         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3967         if (ret)
3968                 goto err;
3969         btrfs_release_path(path);
3970
3971         /*
3972          * If we don't have dir index, we have to get it by looking up
3973          * the inode ref, since we get the inode ref, remove it directly,
3974          * it is unnecessary to do delayed deletion.
3975          *
3976          * But if we have dir index, needn't search inode ref to get it.
3977          * Since the inode ref is close to the inode item, it is better
3978          * that we delay to delete it, and just do this deletion when
3979          * we update the inode item.
3980          */
3981         if (BTRFS_I(inode)->dir_index) {
3982                 ret = btrfs_delayed_delete_inode_ref(inode);
3983                 if (!ret) {
3984                         index = BTRFS_I(inode)->dir_index;
3985                         goto skip_backref;
3986                 }
3987         }
3988
3989         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3990                                   dir_ino, &index);
3991         if (ret) {
3992                 btrfs_info(root->fs_info,
3993                         "failed to delete reference to %.*s, inode %llu parent %llu",
3994                         name_len, name, ino, dir_ino);
3995                 btrfs_abort_transaction(trans, root, ret);
3996                 goto err;
3997         }
3998 skip_backref:
3999         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4000         if (ret) {
4001                 btrfs_abort_transaction(trans, root, ret);
4002                 goto err;
4003         }
4004
4005         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
4006                                          inode, dir_ino);
4007         if (ret != 0 && ret != -ENOENT) {
4008                 btrfs_abort_transaction(trans, root, ret);
4009                 goto err;
4010         }
4011
4012         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
4013                                            dir, index);
4014         if (ret == -ENOENT)
4015                 ret = 0;
4016         else if (ret)
4017                 btrfs_abort_transaction(trans, root, ret);
4018 err:
4019         btrfs_free_path(path);
4020         if (ret)
4021                 goto out;
4022
4023         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4024         inode_inc_iversion(inode);
4025         inode_inc_iversion(dir);
4026         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4027         ret = btrfs_update_inode(trans, root, dir);
4028 out:
4029         return ret;
4030 }
4031
4032 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4033                        struct btrfs_root *root,
4034                        struct inode *dir, struct inode *inode,
4035                        const char *name, int name_len)
4036 {
4037         int ret;
4038         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4039         if (!ret) {
4040                 drop_nlink(inode);
4041                 ret = btrfs_update_inode(trans, root, inode);
4042         }
4043         return ret;
4044 }
4045
4046 /*
4047  * helper to start transaction for unlink and rmdir.
4048  *
4049  * unlink and rmdir are special in btrfs, they do not always free space, so
4050  * if we cannot make our reservations the normal way try and see if there is
4051  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4052  * allow the unlink to occur.
4053  */
4054 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4055 {
4056         struct btrfs_root *root = BTRFS_I(dir)->root;
4057
4058         /*
4059          * 1 for the possible orphan item
4060          * 1 for the dir item
4061          * 1 for the dir index
4062          * 1 for the inode ref
4063          * 1 for the inode
4064          */
4065         return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4066 }
4067
4068 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4069 {
4070         struct btrfs_root *root = BTRFS_I(dir)->root;
4071         struct btrfs_trans_handle *trans;
4072         struct inode *inode = d_inode(dentry);
4073         int ret;
4074
4075         trans = __unlink_start_trans(dir);
4076         if (IS_ERR(trans))
4077                 return PTR_ERR(trans);
4078
4079         btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
4080
4081         ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4082                                  dentry->d_name.name, dentry->d_name.len);
4083         if (ret)
4084                 goto out;
4085
4086         if (inode->i_nlink == 0) {
4087                 ret = btrfs_orphan_add(trans, inode);
4088                 if (ret)
4089                         goto out;
4090         }
4091
4092 out:
4093         btrfs_end_transaction(trans, root);
4094         btrfs_btree_balance_dirty(root);
4095         return ret;
4096 }
4097
4098 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4099                         struct btrfs_root *root,
4100                         struct inode *dir, u64 objectid,
4101                         const char *name, int name_len)
4102 {
4103         struct btrfs_path *path;
4104         struct extent_buffer *leaf;
4105         struct btrfs_dir_item *di;
4106         struct btrfs_key key;
4107         u64 index;
4108         int ret;
4109         u64 dir_ino = btrfs_ino(dir);
4110
4111         path = btrfs_alloc_path();
4112         if (!path)
4113                 return -ENOMEM;
4114
4115         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4116                                    name, name_len, -1);
4117         if (IS_ERR_OR_NULL(di)) {
4118                 if (!di)
4119                         ret = -ENOENT;
4120                 else
4121                         ret = PTR_ERR(di);
4122                 goto out;
4123         }
4124
4125         leaf = path->nodes[0];
4126         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4127         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4128         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4129         if (ret) {
4130                 btrfs_abort_transaction(trans, root, ret);
4131                 goto out;
4132         }
4133         btrfs_release_path(path);
4134
4135         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4136                                  objectid, root->root_key.objectid,
4137                                  dir_ino, &index, name, name_len);
4138         if (ret < 0) {
4139                 if (ret != -ENOENT) {
4140                         btrfs_abort_transaction(trans, root, ret);
4141                         goto out;
4142                 }
4143                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4144                                                  name, name_len);
4145                 if (IS_ERR_OR_NULL(di)) {
4146                         if (!di)
4147                                 ret = -ENOENT;
4148                         else
4149                                 ret = PTR_ERR(di);
4150                         btrfs_abort_transaction(trans, root, ret);
4151                         goto out;
4152                 }
4153
4154                 leaf = path->nodes[0];
4155                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4156                 btrfs_release_path(path);
4157                 index = key.offset;
4158         }
4159         btrfs_release_path(path);
4160
4161         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4162         if (ret) {
4163                 btrfs_abort_transaction(trans, root, ret);
4164                 goto out;
4165         }
4166
4167         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4168         inode_inc_iversion(dir);
4169         dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4170         ret = btrfs_update_inode_fallback(trans, root, dir);
4171         if (ret)
4172                 btrfs_abort_transaction(trans, root, ret);
4173 out:
4174         btrfs_free_path(path);
4175         return ret;
4176 }
4177
4178 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4179 {
4180         struct inode *inode = d_inode(dentry);
4181         int err = 0;
4182         struct btrfs_root *root = BTRFS_I(dir)->root;
4183         struct btrfs_trans_handle *trans;
4184
4185         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4186                 return -ENOTEMPTY;
4187         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
4188                 return -EPERM;
4189
4190         trans = __unlink_start_trans(dir);
4191         if (IS_ERR(trans))
4192                 return PTR_ERR(trans);
4193
4194         if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4195                 err = btrfs_unlink_subvol(trans, root, dir,
4196                                           BTRFS_I(inode)->location.objectid,
4197                                           dentry->d_name.name,
4198                                           dentry->d_name.len);
4199                 goto out;
4200         }
4201
4202         err = btrfs_orphan_add(trans, inode);
4203         if (err)
4204                 goto out;
4205
4206         /* now the directory is empty */
4207         err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4208                                  dentry->d_name.name, dentry->d_name.len);
4209         if (!err)
4210                 btrfs_i_size_write(inode, 0);
4211 out:
4212         btrfs_end_transaction(trans, root);
4213         btrfs_btree_balance_dirty(root);
4214
4215         return err;
4216 }
4217
4218 static int truncate_space_check(struct btrfs_trans_handle *trans,
4219                                 struct btrfs_root *root,
4220                                 u64 bytes_deleted)
4221 {
4222         int ret;
4223
4224         bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
4225         ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
4226                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4227         if (!ret)
4228                 trans->bytes_reserved += bytes_deleted;
4229         return ret;
4230
4231 }
4232
4233 static int truncate_inline_extent(struct inode *inode,
4234                                   struct btrfs_path *path,
4235                                   struct btrfs_key *found_key,
4236                                   const u64 item_end,
4237                                   const u64 new_size)
4238 {
4239         struct extent_buffer *leaf = path->nodes[0];
4240         int slot = path->slots[0];
4241         struct btrfs_file_extent_item *fi;
4242         u32 size = (u32)(new_size - found_key->offset);
4243         struct btrfs_root *root = BTRFS_I(inode)->root;
4244
4245         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4246
4247         if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
4248                 loff_t offset = new_size;
4249                 loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
4250
4251                 /*
4252                  * Zero out the remaining of the last page of our inline extent,
4253                  * instead of directly truncating our inline extent here - that
4254                  * would be much more complex (decompressing all the data, then
4255                  * compressing the truncated data, which might be bigger than
4256                  * the size of the inline extent, resize the extent, etc).
4257                  * We release the path because to get the page we might need to
4258                  * read the extent item from disk (data not in the page cache).
4259                  */
4260                 btrfs_release_path(path);
4261                 return btrfs_truncate_page(inode, offset, page_end - offset, 0);
4262         }
4263
4264         btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4265         size = btrfs_file_extent_calc_inline_size(size);
4266         btrfs_truncate_item(root, path, size, 1);
4267
4268         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4269                 inode_sub_bytes(inode, item_end + 1 - new_size);
4270
4271         return 0;
4272 }
4273
4274 /*
4275  * this can truncate away extent items, csum items and directory items.
4276  * It starts at a high offset and removes keys until it can't find
4277  * any higher than new_size
4278  *
4279  * csum items that cross the new i_size are truncated to the new size
4280  * as well.
4281  *
4282  * min_type is the minimum key type to truncate down to.  If set to 0, this
4283  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4284  */
4285 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4286                                struct btrfs_root *root,
4287                                struct inode *inode,
4288                                u64 new_size, u32 min_type)
4289 {
4290         struct btrfs_path *path;
4291         struct extent_buffer *leaf;
4292         struct btrfs_file_extent_item *fi;
4293         struct btrfs_key key;
4294         struct btrfs_key found_key;
4295         u64 extent_start = 0;
4296         u64 extent_num_bytes = 0;
4297         u64 extent_offset = 0;
4298         u64 item_end = 0;
4299         u64 last_size = new_size;
4300         u32 found_type = (u8)-1;
4301         int found_extent;
4302         int del_item;
4303         int pending_del_nr = 0;
4304         int pending_del_slot = 0;
4305         int extent_type = -1;
4306         int ret;
4307         int err = 0;
4308         u64 ino = btrfs_ino(inode);
4309         u64 bytes_deleted = 0;
4310         bool be_nice = 0;
4311         bool should_throttle = 0;
4312         bool should_end = 0;
4313
4314         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4315
4316         /*
4317          * for non-free space inodes and ref cows, we want to back off from
4318          * time to time
4319          */
4320         if (!btrfs_is_free_space_inode(inode) &&
4321             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4322                 be_nice = 1;
4323
4324         path = btrfs_alloc_path();
4325         if (!path)
4326                 return -ENOMEM;
4327         path->reada = -1;
4328
4329         /*
4330          * We want to drop from the next block forward in case this new size is
4331          * not block aligned since we will be keeping the last block of the
4332          * extent just the way it is.
4333          */
4334         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4335             root == root->fs_info->tree_root)
4336                 btrfs_drop_extent_cache(inode, ALIGN(new_size,
4337                                         root->sectorsize), (u64)-1, 0);
4338
4339         /*
4340          * This function is also used to drop the items in the log tree before
4341          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4342          * it is used to drop the loged items. So we shouldn't kill the delayed
4343          * items.
4344          */
4345         if (min_type == 0 && root == BTRFS_I(inode)->root)
4346                 btrfs_kill_delayed_inode_items(inode);
4347
4348         key.objectid = ino;
4349         key.offset = (u64)-1;
4350         key.type = (u8)-1;
4351
4352 search_again:
4353         /*
4354          * with a 16K leaf size and 128MB extents, you can actually queue
4355          * up a huge file in a single leaf.  Most of the time that
4356          * bytes_deleted is > 0, it will be huge by the time we get here
4357          */
4358         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4359                 if (btrfs_should_end_transaction(trans, root)) {
4360                         err = -EAGAIN;
4361                         goto error;
4362                 }
4363         }
4364
4365
4366         path->leave_spinning = 1;
4367         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4368         if (ret < 0) {
4369                 err = ret;
4370                 goto out;
4371         }
4372
4373         if (ret > 0) {
4374                 /* there are no items in the tree for us to truncate, we're
4375                  * done
4376                  */
4377                 if (path->slots[0] == 0)
4378                         goto out;
4379                 path->slots[0]--;
4380         }
4381
4382         while (1) {
4383                 fi = NULL;
4384                 leaf = path->nodes[0];
4385                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4386                 found_type = found_key.type;
4387
4388                 if (found_key.objectid != ino)
4389                         break;
4390
4391                 if (found_type < min_type)
4392                         break;
4393
4394                 item_end = found_key.offset;
4395                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4396                         fi = btrfs_item_ptr(leaf, path->slots[0],
4397                                             struct btrfs_file_extent_item);
4398                         extent_type = btrfs_file_extent_type(leaf, fi);
4399                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4400                                 item_end +=
4401                                     btrfs_file_extent_num_bytes(leaf, fi);
4402                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4403                                 item_end += btrfs_file_extent_inline_len(leaf,
4404                                                          path->slots[0], fi);
4405                         }
4406                         item_end--;
4407                 }
4408                 if (found_type > min_type) {
4409                         del_item = 1;
4410                 } else {
4411                         if (item_end < new_size)
4412                                 break;
4413                         if (found_key.offset >= new_size)
4414                                 del_item = 1;
4415                         else
4416                                 del_item = 0;
4417                 }
4418                 found_extent = 0;
4419                 /* FIXME, shrink the extent if the ref count is only 1 */
4420                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4421                         goto delete;
4422
4423                 if (del_item)
4424                         last_size = found_key.offset;
4425                 else
4426                         last_size = new_size;
4427
4428                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4429                         u64 num_dec;
4430                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4431                         if (!del_item) {
4432                                 u64 orig_num_bytes =
4433                                         btrfs_file_extent_num_bytes(leaf, fi);
4434                                 extent_num_bytes = ALIGN(new_size -
4435                                                 found_key.offset,
4436                                                 root->sectorsize);
4437                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4438                                                          extent_num_bytes);
4439                                 num_dec = (orig_num_bytes -
4440                                            extent_num_bytes);
4441                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4442                                              &root->state) &&
4443                                     extent_start != 0)
4444                                         inode_sub_bytes(inode, num_dec);
4445                                 btrfs_mark_buffer_dirty(leaf);
4446                         } else {
4447                                 extent_num_bytes =
4448                                         btrfs_file_extent_disk_num_bytes(leaf,
4449                                                                          fi);
4450                                 extent_offset = found_key.offset -
4451                                         btrfs_file_extent_offset(leaf, fi);
4452
4453                                 /* FIXME blocksize != 4096 */
4454                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4455                                 if (extent_start != 0) {
4456                                         found_extent = 1;
4457                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4458                                                      &root->state))
4459                                                 inode_sub_bytes(inode, num_dec);
4460                                 }
4461                         }
4462                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4463                         /*
4464                          * we can't truncate inline items that have had
4465                          * special encodings
4466                          */
4467                         if (!del_item &&
4468                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4469                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4470
4471                                 /*
4472                                  * Need to release path in order to truncate a
4473                                  * compressed extent. So delete any accumulated
4474                                  * extent items so far.
4475                                  */
4476                                 if (btrfs_file_extent_compression(leaf, fi) !=
4477                                     BTRFS_COMPRESS_NONE && pending_del_nr) {
4478                                         err = btrfs_del_items(trans, root, path,
4479                                                               pending_del_slot,
4480                                                               pending_del_nr);
4481                                         if (err) {
4482                                                 btrfs_abort_transaction(trans,
4483                                                                         root,
4484                                                                         err);
4485                                                 goto error;
4486                                         }
4487                                         pending_del_nr = 0;
4488                                 }
4489
4490                                 err = truncate_inline_extent(inode, path,
4491                                                              &found_key,
4492                                                              item_end,
4493                                                              new_size);
4494                                 if (err) {
4495                                         btrfs_abort_transaction(trans,
4496                                                                 root, err);
4497                                         goto error;
4498                                 }
4499                         } else if (test_bit(BTRFS_ROOT_REF_COWS,
4500                                             &root->state)) {
4501                                 inode_sub_bytes(inode, item_end + 1 - new_size);
4502                         }
4503                 }
4504 delete:
4505                 if (del_item) {
4506                         if (!pending_del_nr) {
4507                                 /* no pending yet, add ourselves */
4508                                 pending_del_slot = path->slots[0];
4509                                 pending_del_nr = 1;
4510                         } else if (pending_del_nr &&
4511                                    path->slots[0] + 1 == pending_del_slot) {
4512                                 /* hop on the pending chunk */
4513                                 pending_del_nr++;
4514                                 pending_del_slot = path->slots[0];
4515                         } else {
4516                                 BUG();
4517                         }
4518                 } else {
4519                         break;
4520                 }
4521                 should_throttle = 0;
4522
4523                 if (found_extent &&
4524                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4525                      root == root->fs_info->tree_root)) {
4526                         btrfs_set_path_blocking(path);
4527                         bytes_deleted += extent_num_bytes;
4528                         ret = btrfs_free_extent(trans, root, extent_start,
4529                                                 extent_num_bytes, 0,
4530                                                 btrfs_header_owner(leaf),
4531                                                 ino, extent_offset);
4532                         BUG_ON(ret);
4533                         if (btrfs_should_throttle_delayed_refs(trans, root))
4534                                 btrfs_async_run_delayed_refs(root,
4535                                         trans->delayed_ref_updates * 2, 0);
4536                         if (be_nice) {
4537                                 if (truncate_space_check(trans, root,
4538                                                          extent_num_bytes)) {
4539                                         should_end = 1;
4540                                 }
4541                                 if (btrfs_should_throttle_delayed_refs(trans,
4542                                                                        root)) {
4543                                         should_throttle = 1;
4544                                 }
4545                         }
4546                 }
4547
4548                 if (found_type == BTRFS_INODE_ITEM_KEY)
4549                         break;
4550
4551                 if (path->slots[0] == 0 ||
4552                     path->slots[0] != pending_del_slot ||
4553                     should_throttle || should_end) {
4554                         if (pending_del_nr) {
4555                                 ret = btrfs_del_items(trans, root, path,
4556                                                 pending_del_slot,
4557                                                 pending_del_nr);
4558                                 if (ret) {
4559                                         btrfs_abort_transaction(trans,
4560                                                                 root, ret);
4561                                         goto error;
4562                                 }
4563                                 pending_del_nr = 0;
4564                         }
4565                         btrfs_release_path(path);
4566                         if (should_throttle) {
4567                                 unsigned long updates = trans->delayed_ref_updates;
4568                                 if (updates) {
4569                                         trans->delayed_ref_updates = 0;
4570                                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4571                                         if (ret && !err)
4572                                                 err = ret;
4573                                 }
4574                         }
4575                         /*
4576                          * if we failed to refill our space rsv, bail out
4577                          * and let the transaction restart
4578                          */
4579                         if (should_end) {
4580                                 err = -EAGAIN;
4581                                 goto error;
4582                         }
4583                         goto search_again;
4584                 } else {
4585                         path->slots[0]--;
4586                 }
4587         }
4588 out:
4589         if (pending_del_nr) {
4590                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4591                                       pending_del_nr);
4592                 if (ret)
4593                         btrfs_abort_transaction(trans, root, ret);
4594         }
4595 error:
4596         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4597                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4598
4599         btrfs_free_path(path);
4600
4601         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4602                 unsigned long updates = trans->delayed_ref_updates;
4603                 if (updates) {
4604                         trans->delayed_ref_updates = 0;
4605                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4606                         if (ret && !err)
4607                                 err = ret;
4608                 }
4609         }
4610         return err;
4611 }
4612
4613 /*
4614  * btrfs_truncate_page - read, zero a chunk and write a page
4615  * @inode - inode that we're zeroing
4616  * @from - the offset to start zeroing
4617  * @len - the length to zero, 0 to zero the entire range respective to the
4618  *      offset
4619  * @front - zero up to the offset instead of from the offset on
4620  *
4621  * This will find the page for the "from" offset and cow the page and zero the
4622  * part we want to zero.  This is used with truncate and hole punching.
4623  */
4624 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4625                         int front)
4626 {
4627         struct address_space *mapping = inode->i_mapping;
4628         struct btrfs_root *root = BTRFS_I(inode)->root;
4629         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4630         struct btrfs_ordered_extent *ordered;
4631         struct extent_state *cached_state = NULL;
4632         char *kaddr;
4633         u32 blocksize = root->sectorsize;
4634         pgoff_t index = from >> PAGE_CACHE_SHIFT;
4635         unsigned offset = from & (PAGE_CACHE_SIZE-1);
4636         struct page *page;
4637         gfp_t mask = btrfs_alloc_write_mask(mapping);
4638         int ret = 0;
4639         u64 page_start;
4640         u64 page_end;
4641
4642         if ((offset & (blocksize - 1)) == 0 &&
4643             (!len || ((len & (blocksize - 1)) == 0)))
4644                 goto out;
4645         ret = btrfs_delalloc_reserve_space(inode,
4646                         round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE);
4647         if (ret)
4648                 goto out;
4649
4650 again:
4651         page = find_or_create_page(mapping, index, mask);
4652         if (!page) {
4653                 btrfs_delalloc_release_space(inode,
4654                                 round_down(from, PAGE_CACHE_SIZE),
4655                                 PAGE_CACHE_SIZE);
4656                 ret = -ENOMEM;
4657                 goto out;
4658         }
4659
4660         page_start = page_offset(page);
4661         page_end = page_start + PAGE_CACHE_SIZE - 1;
4662
4663         if (!PageUptodate(page)) {
4664                 ret = btrfs_readpage(NULL, page);
4665                 lock_page(page);
4666                 if (page->mapping != mapping) {
4667                         unlock_page(page);
4668                         page_cache_release(page);
4669                         goto again;
4670                 }
4671                 if (!PageUptodate(page)) {
4672                         ret = -EIO;
4673                         goto out_unlock;
4674                 }
4675         }
4676         wait_on_page_writeback(page);
4677
4678         lock_extent_bits(io_tree, page_start, page_end, &cached_state);
4679         set_page_extent_mapped(page);
4680
4681         ordered = btrfs_lookup_ordered_extent(inode, page_start);
4682         if (ordered) {
4683                 unlock_extent_cached(io_tree, page_start, page_end,
4684                                      &cached_state, GFP_NOFS);
4685                 unlock_page(page);
4686                 page_cache_release(page);
4687                 btrfs_start_ordered_extent(inode, ordered, 1);
4688                 btrfs_put_ordered_extent(ordered);
4689                 goto again;
4690         }
4691
4692         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4693                           EXTENT_DIRTY | EXTENT_DELALLOC |
4694                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4695                           0, 0, &cached_state, GFP_NOFS);
4696
4697         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4698                                         &cached_state);
4699         if (ret) {
4700                 unlock_extent_cached(io_tree, page_start, page_end,
4701                                      &cached_state, GFP_NOFS);
4702                 goto out_unlock;
4703         }
4704
4705         if (offset != PAGE_CACHE_SIZE) {
4706                 if (!len)
4707                         len = PAGE_CACHE_SIZE - offset;
4708                 kaddr = kmap(page);
4709                 if (front)
4710                         memset(kaddr, 0, offset);
4711                 else
4712                         memset(kaddr + offset, 0, len);
4713                 flush_dcache_page(page);
4714                 kunmap(page);
4715         }
4716         ClearPageChecked(page);
4717         set_page_dirty(page);
4718         unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4719                              GFP_NOFS);
4720
4721 out_unlock:
4722         if (ret)
4723                 btrfs_delalloc_release_space(inode, page_start,
4724                                              PAGE_CACHE_SIZE);
4725         unlock_page(page);
4726         page_cache_release(page);
4727 out:
4728         return ret;
4729 }
4730
4731 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4732                              u64 offset, u64 len)
4733 {
4734         struct btrfs_trans_handle *trans;
4735         int ret;
4736
4737         /*
4738          * Still need to make sure the inode looks like it's been updated so
4739          * that any holes get logged if we fsync.
4740          */
4741         if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4742                 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4743                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4744                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4745                 return 0;
4746         }
4747
4748         /*
4749          * 1 - for the one we're dropping
4750          * 1 - for the one we're adding
4751          * 1 - for updating the inode.
4752          */
4753         trans = btrfs_start_transaction(root, 3);
4754         if (IS_ERR(trans))
4755                 return PTR_ERR(trans);
4756
4757         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4758         if (ret) {
4759                 btrfs_abort_transaction(trans, root, ret);
4760                 btrfs_end_transaction(trans, root);
4761                 return ret;
4762         }
4763
4764         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4765                                        0, 0, len, 0, len, 0, 0, 0);
4766         if (ret)
4767                 btrfs_abort_transaction(trans, root, ret);
4768         else
4769                 btrfs_update_inode(trans, root, inode);
4770         btrfs_end_transaction(trans, root);
4771         return ret;
4772 }
4773
4774 /*
4775  * This function puts in dummy file extents for the area we're creating a hole
4776  * for.  So if we are truncating this file to a larger size we need to insert
4777  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4778  * the range between oldsize and size
4779  */
4780 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4781 {
4782         struct btrfs_root *root = BTRFS_I(inode)->root;
4783         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4784         struct extent_map *em = NULL;
4785         struct extent_state *cached_state = NULL;
4786         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4787         u64 hole_start = ALIGN(oldsize, root->sectorsize);
4788         u64 block_end = ALIGN(size, root->sectorsize);
4789         u64 last_byte;
4790         u64 cur_offset;
4791         u64 hole_size;
4792         int err = 0;
4793
4794         /*
4795          * If our size started in the middle of a page we need to zero out the
4796          * rest of the page before we expand the i_size, otherwise we could
4797          * expose stale data.
4798          */
4799         err = btrfs_truncate_page(inode, oldsize, 0, 0);
4800         if (err)
4801                 return err;
4802
4803         if (size <= hole_start)
4804                 return 0;
4805
4806         while (1) {
4807                 struct btrfs_ordered_extent *ordered;
4808
4809                 lock_extent_bits(io_tree, hole_start, block_end - 1,
4810                                  &cached_state);
4811                 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4812                                                      block_end - hole_start);
4813                 if (!ordered)
4814                         break;
4815                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4816                                      &cached_state, GFP_NOFS);
4817                 btrfs_start_ordered_extent(inode, ordered, 1);
4818                 btrfs_put_ordered_extent(ordered);
4819         }
4820
4821         cur_offset = hole_start;
4822         while (1) {
4823                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4824                                 block_end - cur_offset, 0);
4825                 if (IS_ERR(em)) {
4826                         err = PTR_ERR(em);
4827                         em = NULL;
4828                         break;
4829                 }
4830                 last_byte = min(extent_map_end(em), block_end);
4831                 last_byte = ALIGN(last_byte , root->sectorsize);
4832                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4833                         struct extent_map *hole_em;
4834                         hole_size = last_byte - cur_offset;
4835
4836                         err = maybe_insert_hole(root, inode, cur_offset,
4837                                                 hole_size);
4838                         if (err)
4839                                 break;
4840                         btrfs_drop_extent_cache(inode, cur_offset,
4841                                                 cur_offset + hole_size - 1, 0);
4842                         hole_em = alloc_extent_map();
4843                         if (!hole_em) {
4844                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4845                                         &BTRFS_I(inode)->runtime_flags);
4846                                 goto next;
4847                         }
4848                         hole_em->start = cur_offset;
4849                         hole_em->len = hole_size;
4850                         hole_em->orig_start = cur_offset;
4851
4852                         hole_em->block_start = EXTENT_MAP_HOLE;
4853                         hole_em->block_len = 0;
4854                         hole_em->orig_block_len = 0;
4855                         hole_em->ram_bytes = hole_size;
4856                         hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4857                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
4858                         hole_em->generation = root->fs_info->generation;
4859
4860                         while (1) {
4861                                 write_lock(&em_tree->lock);
4862                                 err = add_extent_mapping(em_tree, hole_em, 1);
4863                                 write_unlock(&em_tree->lock);
4864                                 if (err != -EEXIST)
4865                                         break;
4866                                 btrfs_drop_extent_cache(inode, cur_offset,
4867                                                         cur_offset +
4868                                                         hole_size - 1, 0);
4869                         }
4870                         free_extent_map(hole_em);
4871                 }
4872 next:
4873                 free_extent_map(em);
4874                 em = NULL;
4875                 cur_offset = last_byte;
4876                 if (cur_offset >= block_end)
4877                         break;
4878         }
4879         free_extent_map(em);
4880         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4881                              GFP_NOFS);
4882         return err;
4883 }
4884
4885 static int wait_snapshoting_atomic_t(atomic_t *a)
4886 {
4887         schedule();
4888         return 0;
4889 }
4890
4891 static void wait_for_snapshot_creation(struct btrfs_root *root)
4892 {
4893         while (true) {
4894                 int ret;
4895
4896                 ret = btrfs_start_write_no_snapshoting(root);
4897                 if (ret)
4898                         break;
4899                 wait_on_atomic_t(&root->will_be_snapshoted,
4900                                  wait_snapshoting_atomic_t,
4901                                  TASK_UNINTERRUPTIBLE);
4902         }
4903 }
4904
4905 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4906 {
4907         struct btrfs_root *root = BTRFS_I(inode)->root;
4908         struct btrfs_trans_handle *trans;
4909         loff_t oldsize = i_size_read(inode);
4910         loff_t newsize = attr->ia_size;
4911         int mask = attr->ia_valid;
4912         int ret;
4913
4914         /*
4915          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4916          * special case where we need to update the times despite not having
4917          * these flags set.  For all other operations the VFS set these flags
4918          * explicitly if it wants a timestamp update.
4919          */
4920         if (newsize != oldsize) {
4921                 inode_inc_iversion(inode);
4922                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4923                         inode->i_ctime = inode->i_mtime =
4924                                 current_fs_time(inode->i_sb);
4925         }
4926
4927         if (newsize > oldsize) {
4928                 truncate_pagecache(inode, newsize);
4929                 /*
4930                  * Don't do an expanding truncate while snapshoting is ongoing.
4931                  * This is to ensure the snapshot captures a fully consistent
4932                  * state of this file - if the snapshot captures this expanding
4933                  * truncation, it must capture all writes that happened before
4934                  * this truncation.
4935                  */
4936                 wait_for_snapshot_creation(root);
4937                 ret = btrfs_cont_expand(inode, oldsize, newsize);
4938                 if (ret) {
4939                         btrfs_end_write_no_snapshoting(root);
4940                         return ret;
4941                 }
4942
4943                 trans = btrfs_start_transaction(root, 1);
4944                 if (IS_ERR(trans)) {
4945                         btrfs_end_write_no_snapshoting(root);
4946                         return PTR_ERR(trans);
4947                 }
4948
4949                 i_size_write(inode, newsize);
4950                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4951                 ret = btrfs_update_inode(trans, root, inode);
4952                 btrfs_end_write_no_snapshoting(root);
4953                 btrfs_end_transaction(trans, root);
4954         } else {
4955
4956                 /*
4957                  * We're truncating a file that used to have good data down to
4958                  * zero. Make sure it gets into the ordered flush list so that
4959                  * any new writes get down to disk quickly.
4960                  */
4961                 if (newsize == 0)
4962                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4963                                 &BTRFS_I(inode)->runtime_flags);
4964
4965                 /*
4966                  * 1 for the orphan item we're going to add
4967                  * 1 for the orphan item deletion.
4968                  */
4969                 trans = btrfs_start_transaction(root, 2);
4970                 if (IS_ERR(trans))
4971                         return PTR_ERR(trans);
4972
4973                 /*
4974                  * We need to do this in case we fail at _any_ point during the
4975                  * actual truncate.  Once we do the truncate_setsize we could
4976                  * invalidate pages which forces any outstanding ordered io to
4977                  * be instantly completed which will give us extents that need
4978                  * to be truncated.  If we fail to get an orphan inode down we
4979                  * could have left over extents that were never meant to live,
4980                  * so we need to garuntee from this point on that everything
4981                  * will be consistent.
4982                  */
4983                 ret = btrfs_orphan_add(trans, inode);
4984                 btrfs_end_transaction(trans, root);
4985                 if (ret)
4986                         return ret;
4987
4988                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
4989                 truncate_setsize(inode, newsize);
4990
4991                 /* Disable nonlocked read DIO to avoid the end less truncate */
4992                 btrfs_inode_block_unlocked_dio(inode);
4993                 inode_dio_wait(inode);
4994                 btrfs_inode_resume_unlocked_dio(inode);
4995
4996                 ret = btrfs_truncate(inode);
4997                 if (ret && inode->i_nlink) {
4998                         int err;
4999
5000                         /*
5001                          * failed to truncate, disk_i_size is only adjusted down
5002                          * as we remove extents, so it should represent the true
5003                          * size of the inode, so reset the in memory size and
5004                          * delete our orphan entry.
5005                          */
5006                         trans = btrfs_join_transaction(root);
5007                         if (IS_ERR(trans)) {
5008                                 btrfs_orphan_del(NULL, inode);
5009                                 return ret;
5010                         }
5011                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
5012                         err = btrfs_orphan_del(trans, inode);
5013                         if (err)
5014                                 btrfs_abort_transaction(trans, root, err);
5015                         btrfs_end_transaction(trans, root);
5016                 }
5017         }
5018
5019         return ret;
5020 }
5021
5022 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
5023 {
5024         struct inode *inode = d_inode(dentry);
5025         struct btrfs_root *root = BTRFS_I(inode)->root;
5026         int err;
5027
5028         if (btrfs_root_readonly(root))
5029                 return -EROFS;
5030
5031         err = inode_change_ok(inode, attr);
5032         if (err)
5033                 return err;
5034
5035         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
5036                 err = btrfs_setsize(inode, attr);
5037                 if (err)
5038                         return err;
5039         }
5040
5041         if (attr->ia_valid) {
5042                 setattr_copy(inode, attr);
5043                 inode_inc_iversion(inode);
5044                 err = btrfs_dirty_inode(inode);
5045
5046                 if (!err && attr->ia_valid & ATTR_MODE)
5047                         err = posix_acl_chmod(inode, inode->i_mode);
5048         }
5049
5050         return err;
5051 }
5052
5053 /*
5054  * While truncating the inode pages during eviction, we get the VFS calling
5055  * btrfs_invalidatepage() against each page of the inode. This is slow because
5056  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5057  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5058  * extent_state structures over and over, wasting lots of time.
5059  *
5060  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5061  * those expensive operations on a per page basis and do only the ordered io
5062  * finishing, while we release here the extent_map and extent_state structures,
5063  * without the excessive merging and splitting.
5064  */
5065 static void evict_inode_truncate_pages(struct inode *inode)
5066 {
5067         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5068         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
5069         struct rb_node *node;
5070
5071         ASSERT(inode->i_state & I_FREEING);
5072         truncate_inode_pages_final(&inode->i_data);
5073
5074         write_lock(&map_tree->lock);
5075         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5076                 struct extent_map *em;
5077
5078                 node = rb_first(&map_tree->map);
5079                 em = rb_entry(node, struct extent_map, rb_node);
5080                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5081                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5082                 remove_extent_mapping(map_tree, em);
5083                 free_extent_map(em);
5084                 if (need_resched()) {
5085                         write_unlock(&map_tree->lock);
5086                         cond_resched();
5087                         write_lock(&map_tree->lock);
5088                 }
5089         }
5090         write_unlock(&map_tree->lock);
5091
5092         /*
5093          * Keep looping until we have no more ranges in the io tree.
5094          * We can have ongoing bios started by readpages (called from readahead)
5095          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5096          * still in progress (unlocked the pages in the bio but did not yet
5097          * unlocked the ranges in the io tree). Therefore this means some
5098          * ranges can still be locked and eviction started because before
5099          * submitting those bios, which are executed by a separate task (work
5100          * queue kthread), inode references (inode->i_count) were not taken
5101          * (which would be dropped in the end io callback of each bio).
5102          * Therefore here we effectively end up waiting for those bios and
5103          * anyone else holding locked ranges without having bumped the inode's
5104          * reference count - if we don't do it, when they access the inode's
5105          * io_tree to unlock a range it may be too late, leading to an
5106          * use-after-free issue.
5107          */
5108         spin_lock(&io_tree->lock);
5109         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5110                 struct extent_state *state;
5111                 struct extent_state *cached_state = NULL;
5112                 u64 start;
5113                 u64 end;
5114
5115                 node = rb_first(&io_tree->state);
5116                 state = rb_entry(node, struct extent_state, rb_node);
5117                 start = state->start;
5118                 end = state->end;
5119                 spin_unlock(&io_tree->lock);
5120
5121                 lock_extent_bits(io_tree, start, end, &cached_state);
5122
5123                 /*
5124                  * If still has DELALLOC flag, the extent didn't reach disk,
5125                  * and its reserved space won't be freed by delayed_ref.
5126                  * So we need to free its reserved space here.
5127                  * (Refer to comment in btrfs_invalidatepage, case 2)
5128                  *
5129                  * Note, end is the bytenr of last byte, so we need + 1 here.
5130                  */
5131                 if (state->state & EXTENT_DELALLOC)
5132                         btrfs_qgroup_free_data(inode, start, end - start + 1);
5133
5134                 clear_extent_bit(io_tree, start, end,
5135                                  EXTENT_LOCKED | EXTENT_DIRTY |
5136                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5137                                  EXTENT_DEFRAG, 1, 1,
5138                                  &cached_state, GFP_NOFS);
5139
5140                 cond_resched();
5141                 spin_lock(&io_tree->lock);
5142         }
5143         spin_unlock(&io_tree->lock);
5144 }
5145
5146 void btrfs_evict_inode(struct inode *inode)
5147 {
5148         struct btrfs_trans_handle *trans;
5149         struct btrfs_root *root = BTRFS_I(inode)->root;
5150         struct btrfs_block_rsv *rsv, *global_rsv;
5151         int steal_from_global = 0;
5152         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
5153         int ret;
5154
5155         trace_btrfs_inode_evict(inode);
5156
5157         evict_inode_truncate_pages(inode);
5158
5159         if (inode->i_nlink &&
5160             ((btrfs_root_refs(&root->root_item) != 0 &&
5161               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5162              btrfs_is_free_space_inode(inode)))
5163                 goto no_delete;
5164
5165         if (is_bad_inode(inode)) {
5166                 btrfs_orphan_del(NULL, inode);
5167                 goto no_delete;
5168         }
5169         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5170         if (!special_file(inode->i_mode))
5171                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5172
5173         btrfs_free_io_failure_record(inode, 0, (u64)-1);
5174
5175         if (root->fs_info->log_root_recovering) {
5176                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5177                                  &BTRFS_I(inode)->runtime_flags));
5178                 goto no_delete;
5179         }
5180
5181         if (inode->i_nlink > 0) {
5182                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5183                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5184                 goto no_delete;
5185         }
5186
5187         ret = btrfs_commit_inode_delayed_inode(inode);
5188         if (ret) {
5189                 btrfs_orphan_del(NULL, inode);
5190                 goto no_delete;
5191         }
5192
5193         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
5194         if (!rsv) {
5195                 btrfs_orphan_del(NULL, inode);
5196                 goto no_delete;
5197         }
5198         rsv->size = min_size;
5199         rsv->failfast = 1;
5200         global_rsv = &root->fs_info->global_block_rsv;
5201
5202         btrfs_i_size_write(inode, 0);
5203
5204         /*
5205          * This is a bit simpler than btrfs_truncate since we've already
5206          * reserved our space for our orphan item in the unlink, so we just
5207          * need to reserve some slack space in case we add bytes and update
5208          * inode item when doing the truncate.
5209          */
5210         while (1) {
5211                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5212                                              BTRFS_RESERVE_FLUSH_LIMIT);
5213
5214                 /*
5215                  * Try and steal from the global reserve since we will
5216                  * likely not use this space anyway, we want to try as
5217                  * hard as possible to get this to work.
5218                  */
5219                 if (ret)
5220                         steal_from_global++;
5221                 else
5222                         steal_from_global = 0;
5223                 ret = 0;
5224
5225                 /*
5226                  * steal_from_global == 0: we reserved stuff, hooray!
5227                  * steal_from_global == 1: we didn't reserve stuff, boo!
5228                  * steal_from_global == 2: we've committed, still not a lot of
5229                  * room but maybe we'll have room in the global reserve this
5230                  * time.
5231                  * steal_from_global == 3: abandon all hope!
5232                  */
5233                 if (steal_from_global > 2) {
5234                         btrfs_warn(root->fs_info,
5235                                 "Could not get space for a delete, will truncate on mount %d",
5236                                 ret);
5237                         btrfs_orphan_del(NULL, inode);
5238                         btrfs_free_block_rsv(root, rsv);
5239                         goto no_delete;
5240                 }
5241
5242                 trans = btrfs_join_transaction(root);
5243                 if (IS_ERR(trans)) {
5244                         btrfs_orphan_del(NULL, inode);
5245                         btrfs_free_block_rsv(root, rsv);
5246                         goto no_delete;
5247                 }
5248
5249                 /*
5250                  * We can't just steal from the global reserve, we need tomake
5251                  * sure there is room to do it, if not we need to commit and try
5252                  * again.
5253                  */
5254                 if (steal_from_global) {
5255                         if (!btrfs_check_space_for_delayed_refs(trans, root))
5256                                 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5257                                                               min_size);
5258                         else
5259                                 ret = -ENOSPC;
5260                 }
5261
5262                 /*
5263                  * Couldn't steal from the global reserve, we have too much
5264                  * pending stuff built up, commit the transaction and try it
5265                  * again.
5266                  */
5267                 if (ret) {
5268                         ret = btrfs_commit_transaction(trans, root);
5269                         if (ret) {
5270                                 btrfs_orphan_del(NULL, inode);
5271                                 btrfs_free_block_rsv(root, rsv);
5272                                 goto no_delete;
5273                         }
5274                         continue;
5275                 } else {
5276                         steal_from_global = 0;
5277                 }
5278
5279                 trans->block_rsv = rsv;
5280
5281                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5282                 if (ret != -ENOSPC && ret != -EAGAIN)
5283                         break;
5284
5285                 trans->block_rsv = &root->fs_info->trans_block_rsv;
5286                 btrfs_end_transaction(trans, root);
5287                 trans = NULL;
5288                 btrfs_btree_balance_dirty(root);
5289         }
5290
5291         btrfs_free_block_rsv(root, rsv);
5292
5293         /*
5294          * Errors here aren't a big deal, it just means we leave orphan items
5295          * in the tree.  They will be cleaned up on the next mount.
5296          */
5297         if (ret == 0) {
5298                 trans->block_rsv = root->orphan_block_rsv;
5299                 btrfs_orphan_del(trans, inode);
5300         } else {
5301                 btrfs_orphan_del(NULL, inode);
5302         }
5303
5304         trans->block_rsv = &root->fs_info->trans_block_rsv;
5305         if (!(root == root->fs_info->tree_root ||
5306               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5307                 btrfs_return_ino(root, btrfs_ino(inode));
5308
5309         btrfs_end_transaction(trans, root);
5310         btrfs_btree_balance_dirty(root);
5311 no_delete:
5312         btrfs_remove_delayed_node(inode);
5313         clear_inode(inode);
5314         return;
5315 }
5316
5317 /*
5318  * this returns the key found in the dir entry in the location pointer.
5319  * If no dir entries were found, location->objectid is 0.
5320  */
5321 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5322                                struct btrfs_key *location)
5323 {
5324         const char *name = dentry->d_name.name;
5325         int namelen = dentry->d_name.len;
5326         struct btrfs_dir_item *di;
5327         struct btrfs_path *path;
5328         struct btrfs_root *root = BTRFS_I(dir)->root;
5329         int ret = 0;
5330
5331         path = btrfs_alloc_path();
5332         if (!path)
5333                 return -ENOMEM;
5334
5335         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
5336                                     namelen, 0);
5337         if (IS_ERR(di))
5338                 ret = PTR_ERR(di);
5339
5340         if (IS_ERR_OR_NULL(di))
5341                 goto out_err;
5342
5343         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5344 out:
5345         btrfs_free_path(path);
5346         return ret;
5347 out_err:
5348         location->objectid = 0;
5349         goto out;
5350 }
5351
5352 /*
5353  * when we hit a tree root in a directory, the btrfs part of the inode
5354  * needs to be changed to reflect the root directory of the tree root.  This
5355  * is kind of like crossing a mount point.
5356  */
5357 static int fixup_tree_root_location(struct btrfs_root *root,
5358                                     struct inode *dir,
5359                                     struct dentry *dentry,
5360                                     struct btrfs_key *location,
5361                                     struct btrfs_root **sub_root)
5362 {
5363         struct btrfs_path *path;
5364         struct btrfs_root *new_root;
5365         struct btrfs_root_ref *ref;
5366         struct extent_buffer *leaf;
5367         struct btrfs_key key;
5368         int ret;
5369         int err = 0;
5370
5371         path = btrfs_alloc_path();
5372         if (!path) {
5373                 err = -ENOMEM;
5374                 goto out;
5375         }
5376
5377         err = -ENOENT;
5378         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5379         key.type = BTRFS_ROOT_REF_KEY;
5380         key.offset = location->objectid;
5381
5382         ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
5383                                 0, 0);
5384         if (ret) {
5385                 if (ret < 0)
5386                         err = ret;
5387                 goto out;
5388         }
5389
5390         leaf = path->nodes[0];
5391         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5392         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5393             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5394                 goto out;
5395
5396         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5397                                    (unsigned long)(ref + 1),
5398                                    dentry->d_name.len);
5399         if (ret)
5400                 goto out;
5401
5402         btrfs_release_path(path);
5403
5404         new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
5405         if (IS_ERR(new_root)) {
5406                 err = PTR_ERR(new_root);
5407                 goto out;
5408         }
5409
5410         *sub_root = new_root;
5411         location->objectid = btrfs_root_dirid(&new_root->root_item);
5412         location->type = BTRFS_INODE_ITEM_KEY;
5413         location->offset = 0;
5414         err = 0;
5415 out:
5416         btrfs_free_path(path);
5417         return err;
5418 }
5419
5420 static void inode_tree_add(struct inode *inode)
5421 {
5422         struct btrfs_root *root = BTRFS_I(inode)->root;
5423         struct btrfs_inode *entry;
5424         struct rb_node **p;
5425         struct rb_node *parent;
5426         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5427         u64 ino = btrfs_ino(inode);
5428
5429         if (inode_unhashed(inode))
5430                 return;
5431         parent = NULL;
5432         spin_lock(&root->inode_lock);
5433         p = &root->inode_tree.rb_node;
5434         while (*p) {
5435                 parent = *p;
5436                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5437
5438                 if (ino < btrfs_ino(&entry->vfs_inode))
5439                         p = &parent->rb_left;
5440                 else if (ino > btrfs_ino(&entry->vfs_inode))
5441                         p = &parent->rb_right;
5442                 else {
5443                         WARN_ON(!(entry->vfs_inode.i_state &
5444                                   (I_WILL_FREE | I_FREEING)));
5445                         rb_replace_node(parent, new, &root->inode_tree);
5446                         RB_CLEAR_NODE(parent);
5447                         spin_unlock(&root->inode_lock);
5448                         return;
5449                 }
5450         }
5451         rb_link_node(new, parent, p);
5452         rb_insert_color(new, &root->inode_tree);
5453         spin_unlock(&root->inode_lock);
5454 }
5455
5456 static void inode_tree_del(struct inode *inode)
5457 {
5458         struct btrfs_root *root = BTRFS_I(inode)->root;
5459         int empty = 0;
5460
5461         spin_lock(&root->inode_lock);
5462         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5463                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5464                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5465                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5466         }
5467         spin_unlock(&root->inode_lock);
5468
5469         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5470                 synchronize_srcu(&root->fs_info->subvol_srcu);
5471                 spin_lock(&root->inode_lock);
5472                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5473                 spin_unlock(&root->inode_lock);
5474                 if (empty)
5475                         btrfs_add_dead_root(root);
5476         }
5477 }
5478
5479 void btrfs_invalidate_inodes(struct btrfs_root *root)
5480 {
5481         struct rb_node *node;
5482         struct rb_node *prev;
5483         struct btrfs_inode *entry;
5484         struct inode *inode;
5485         u64 objectid = 0;
5486
5487         if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
5488                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5489
5490         spin_lock(&root->inode_lock);
5491 again:
5492         node = root->inode_tree.rb_node;
5493         prev = NULL;
5494         while (node) {
5495                 prev = node;
5496                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5497
5498                 if (objectid < btrfs_ino(&entry->vfs_inode))
5499                         node = node->rb_left;
5500                 else if (objectid > btrfs_ino(&entry->vfs_inode))
5501                         node = node->rb_right;
5502                 else
5503                         break;
5504         }
5505         if (!node) {
5506                 while (prev) {
5507                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
5508                         if (objectid <= btrfs_ino(&entry->vfs_inode)) {
5509                                 node = prev;
5510                                 break;
5511                         }
5512                         prev = rb_next(prev);
5513                 }
5514         }
5515         while (node) {
5516                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5517                 objectid = btrfs_ino(&entry->vfs_inode) + 1;
5518                 inode = igrab(&entry->vfs_inode);
5519                 if (inode) {
5520                         spin_unlock(&root->inode_lock);
5521                         if (atomic_read(&inode->i_count) > 1)
5522                                 d_prune_aliases(inode);
5523                         /*
5524                          * btrfs_drop_inode will have it removed from
5525                          * the inode cache when its usage count
5526                          * hits zero.
5527                          */
5528                         iput(inode);
5529                         cond_resched();
5530                         spin_lock(&root->inode_lock);
5531                         goto again;
5532                 }
5533
5534                 if (cond_resched_lock(&root->inode_lock))
5535                         goto again;
5536
5537                 node = rb_next(node);
5538         }
5539         spin_unlock(&root->inode_lock);
5540 }
5541
5542 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5543 {
5544         struct btrfs_iget_args *args = p;
5545         inode->i_ino = args->location->objectid;
5546         memcpy(&BTRFS_I(inode)->location, args->location,
5547                sizeof(*args->location));
5548         BTRFS_I(inode)->root = args->root;
5549         return 0;
5550 }
5551
5552 static int btrfs_find_actor(struct inode *inode, void *opaque)
5553 {
5554         struct btrfs_iget_args *args = opaque;
5555         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5556                 args->root == BTRFS_I(inode)->root;
5557 }
5558
5559 static struct inode *btrfs_iget_locked(struct super_block *s,
5560                                        struct btrfs_key *location,
5561                                        struct btrfs_root *root)
5562 {
5563         struct inode *inode;
5564         struct btrfs_iget_args args;
5565         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5566
5567         args.location = location;
5568         args.root = root;
5569
5570         inode = iget5_locked(s, hashval, btrfs_find_actor,
5571                              btrfs_init_locked_inode,
5572                              (void *)&args);
5573         return inode;
5574 }
5575
5576 /* Get an inode object given its location and corresponding root.
5577  * Returns in *is_new if the inode was read from disk
5578  */
5579 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5580                          struct btrfs_root *root, int *new)
5581 {
5582         struct inode *inode;
5583
5584         inode = btrfs_iget_locked(s, location, root);
5585         if (!inode)
5586                 return ERR_PTR(-ENOMEM);
5587
5588         if (inode->i_state & I_NEW) {
5589                 btrfs_read_locked_inode(inode);
5590                 if (!is_bad_inode(inode)) {
5591                         inode_tree_add(inode);
5592                         unlock_new_inode(inode);
5593                         if (new)
5594                                 *new = 1;
5595                 } else {
5596                         unlock_new_inode(inode);
5597                         iput(inode);
5598                         inode = ERR_PTR(-ESTALE);
5599                 }
5600         }
5601
5602         return inode;
5603 }
5604
5605 static struct inode *new_simple_dir(struct super_block *s,
5606                                     struct btrfs_key *key,
5607                                     struct btrfs_root *root)
5608 {
5609         struct inode *inode = new_inode(s);
5610
5611         if (!inode)
5612                 return ERR_PTR(-ENOMEM);
5613
5614         BTRFS_I(inode)->root = root;
5615         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5616         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5617
5618         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5619         inode->i_op = &btrfs_dir_ro_inode_operations;
5620         inode->i_fop = &simple_dir_operations;
5621         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5622         inode->i_mtime = CURRENT_TIME;
5623         inode->i_atime = inode->i_mtime;
5624         inode->i_ctime = inode->i_mtime;
5625         BTRFS_I(inode)->i_otime = inode->i_mtime;
5626
5627         return inode;
5628 }
5629
5630 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5631 {
5632         struct inode *inode;
5633         struct btrfs_root *root = BTRFS_I(dir)->root;
5634         struct btrfs_root *sub_root = root;
5635         struct btrfs_key location;
5636         int index;
5637         int ret = 0;
5638
5639         if (dentry->d_name.len > BTRFS_NAME_LEN)
5640                 return ERR_PTR(-ENAMETOOLONG);
5641
5642         ret = btrfs_inode_by_name(dir, dentry, &location);
5643         if (ret < 0)
5644                 return ERR_PTR(ret);
5645
5646         if (location.objectid == 0)
5647                 return ERR_PTR(-ENOENT);
5648
5649         if (location.type == BTRFS_INODE_ITEM_KEY) {
5650                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5651                 return inode;
5652         }
5653
5654         BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5655
5656         index = srcu_read_lock(&root->fs_info->subvol_srcu);
5657         ret = fixup_tree_root_location(root, dir, dentry,
5658                                        &location, &sub_root);
5659         if (ret < 0) {
5660                 if (ret != -ENOENT)
5661                         inode = ERR_PTR(ret);
5662                 else
5663                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5664         } else {
5665                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5666         }
5667         srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5668
5669         if (!IS_ERR(inode) && root != sub_root) {
5670                 down_read(&root->fs_info->cleanup_work_sem);
5671                 if (!(inode->i_sb->s_flags & MS_RDONLY))
5672                         ret = btrfs_orphan_cleanup(sub_root);
5673                 up_read(&root->fs_info->cleanup_work_sem);
5674                 if (ret) {
5675                         iput(inode);
5676                         inode = ERR_PTR(ret);
5677                 }
5678         }
5679
5680         return inode;
5681 }
5682
5683 static int btrfs_dentry_delete(const struct dentry *dentry)
5684 {
5685         struct btrfs_root *root;
5686         struct inode *inode = d_inode(dentry);
5687
5688         if (!inode && !IS_ROOT(dentry))
5689                 inode = d_inode(dentry->d_parent);
5690
5691         if (inode) {
5692                 root = BTRFS_I(inode)->root;
5693                 if (btrfs_root_refs(&root->root_item) == 0)
5694                         return 1;
5695
5696                 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5697                         return 1;
5698         }
5699         return 0;
5700 }
5701
5702 static void btrfs_dentry_release(struct dentry *dentry)
5703 {
5704         kfree(dentry->d_fsdata);
5705 }
5706
5707 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5708                                    unsigned int flags)
5709 {
5710         struct inode *inode;
5711
5712         inode = btrfs_lookup_dentry(dir, dentry);
5713         if (IS_ERR(inode)) {
5714                 if (PTR_ERR(inode) == -ENOENT)
5715                         inode = NULL;
5716                 else
5717                         return ERR_CAST(inode);
5718         }
5719
5720         return d_splice_alias(inode, dentry);
5721 }
5722
5723 unsigned char btrfs_filetype_table[] = {
5724         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5725 };
5726
5727 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5728 {
5729         struct inode *inode = file_inode(file);
5730         struct btrfs_root *root = BTRFS_I(inode)->root;
5731         struct btrfs_item *item;
5732         struct btrfs_dir_item *di;
5733         struct btrfs_key key;
5734         struct btrfs_key found_key;
5735         struct btrfs_path *path;
5736         struct list_head ins_list;
5737         struct list_head del_list;
5738         int ret;
5739         struct extent_buffer *leaf;
5740         int slot;
5741         unsigned char d_type;
5742         int over = 0;
5743         u32 di_cur;
5744         u32 di_total;
5745         u32 di_len;
5746         int key_type = BTRFS_DIR_INDEX_KEY;
5747         char tmp_name[32];
5748         char *name_ptr;
5749         int name_len;
5750         int is_curr = 0;        /* ctx->pos points to the current index? */
5751
5752         /* FIXME, use a real flag for deciding about the key type */
5753         if (root->fs_info->tree_root == root)
5754                 key_type = BTRFS_DIR_ITEM_KEY;
5755
5756         if (!dir_emit_dots(file, ctx))
5757                 return 0;
5758
5759         path = btrfs_alloc_path();
5760         if (!path)
5761                 return -ENOMEM;
5762
5763         path->reada = 1;
5764
5765         if (key_type == BTRFS_DIR_INDEX_KEY) {
5766                 INIT_LIST_HEAD(&ins_list);
5767                 INIT_LIST_HEAD(&del_list);
5768                 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5769         }
5770
5771         key.type = key_type;
5772         key.offset = ctx->pos;
5773         key.objectid = btrfs_ino(inode);
5774
5775         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5776         if (ret < 0)
5777                 goto err;
5778
5779         while (1) {
5780                 leaf = path->nodes[0];
5781                 slot = path->slots[0];
5782                 if (slot >= btrfs_header_nritems(leaf)) {
5783                         ret = btrfs_next_leaf(root, path);
5784                         if (ret < 0)
5785                                 goto err;
5786                         else if (ret > 0)
5787                                 break;
5788                         continue;
5789                 }
5790
5791                 item = btrfs_item_nr(slot);
5792                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5793
5794                 if (found_key.objectid != key.objectid)
5795                         break;
5796                 if (found_key.type != key_type)
5797                         break;
5798                 if (found_key.offset < ctx->pos)
5799                         goto next;
5800                 if (key_type == BTRFS_DIR_INDEX_KEY &&
5801                     btrfs_should_delete_dir_index(&del_list,
5802                                                   found_key.offset))
5803                         goto next;
5804
5805                 ctx->pos = found_key.offset;
5806                 is_curr = 1;
5807
5808                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5809                 di_cur = 0;
5810                 di_total = btrfs_item_size(leaf, item);
5811
5812                 while (di_cur < di_total) {
5813                         struct btrfs_key location;
5814
5815                         if (verify_dir_item(root, leaf, di))
5816                                 break;
5817
5818                         name_len = btrfs_dir_name_len(leaf, di);
5819                         if (name_len <= sizeof(tmp_name)) {
5820                                 name_ptr = tmp_name;
5821                         } else {
5822                                 name_ptr = kmalloc(name_len, GFP_NOFS);
5823                                 if (!name_ptr) {
5824                                         ret = -ENOMEM;
5825                                         goto err;
5826                                 }
5827                         }
5828                         read_extent_buffer(leaf, name_ptr,
5829                                            (unsigned long)(di + 1), name_len);
5830
5831                         d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5832                         btrfs_dir_item_key_to_cpu(leaf, di, &location);
5833
5834
5835                         /* is this a reference to our own snapshot? If so
5836                          * skip it.
5837                          *
5838                          * In contrast to old kernels, we insert the snapshot's
5839                          * dir item and dir index after it has been created, so
5840                          * we won't find a reference to our own snapshot. We
5841                          * still keep the following code for backward
5842                          * compatibility.
5843                          */
5844                         if (location.type == BTRFS_ROOT_ITEM_KEY &&
5845                             location.objectid == root->root_key.objectid) {
5846                                 over = 0;
5847                                 goto skip;
5848                         }
5849                         over = !dir_emit(ctx, name_ptr, name_len,
5850                                        location.objectid, d_type);
5851
5852 skip:
5853                         if (name_ptr != tmp_name)
5854                                 kfree(name_ptr);
5855
5856                         if (over)
5857                                 goto nopos;
5858                         di_len = btrfs_dir_name_len(leaf, di) +
5859                                  btrfs_dir_data_len(leaf, di) + sizeof(*di);
5860                         di_cur += di_len;
5861                         di = (struct btrfs_dir_item *)((char *)di + di_len);
5862                 }
5863 next:
5864                 path->slots[0]++;
5865         }
5866
5867         if (key_type == BTRFS_DIR_INDEX_KEY) {
5868                 if (is_curr)
5869                         ctx->pos++;
5870                 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5871                 if (ret)
5872                         goto nopos;
5873         }
5874
5875         /* Reached end of directory/root. Bump pos past the last item. */
5876         ctx->pos++;
5877
5878         /*
5879          * Stop new entries from being returned after we return the last
5880          * entry.
5881          *
5882          * New directory entries are assigned a strictly increasing
5883          * offset.  This means that new entries created during readdir
5884          * are *guaranteed* to be seen in the future by that readdir.
5885          * This has broken buggy programs which operate on names as
5886          * they're returned by readdir.  Until we re-use freed offsets
5887          * we have this hack to stop new entries from being returned
5888          * under the assumption that they'll never reach this huge
5889          * offset.
5890          *
5891          * This is being careful not to overflow 32bit loff_t unless the
5892          * last entry requires it because doing so has broken 32bit apps
5893          * in the past.
5894          */
5895         if (key_type == BTRFS_DIR_INDEX_KEY) {
5896                 if (ctx->pos >= INT_MAX)
5897                         ctx->pos = LLONG_MAX;
5898                 else
5899                         ctx->pos = INT_MAX;
5900         }
5901 nopos:
5902         ret = 0;
5903 err:
5904         if (key_type == BTRFS_DIR_INDEX_KEY)
5905                 btrfs_put_delayed_items(&ins_list, &del_list);
5906         btrfs_free_path(path);
5907         return ret;
5908 }
5909
5910 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5911 {
5912         struct btrfs_root *root = BTRFS_I(inode)->root;
5913         struct btrfs_trans_handle *trans;
5914         int ret = 0;
5915         bool nolock = false;
5916
5917         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5918                 return 0;
5919
5920         if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5921                 nolock = true;
5922
5923         if (wbc->sync_mode == WB_SYNC_ALL) {
5924                 if (nolock)
5925                         trans = btrfs_join_transaction_nolock(root);
5926                 else
5927                         trans = btrfs_join_transaction(root);
5928                 if (IS_ERR(trans))
5929                         return PTR_ERR(trans);
5930                 ret = btrfs_commit_transaction(trans, root);
5931         }
5932         return ret;
5933 }
5934
5935 /*
5936  * This is somewhat expensive, updating the tree every time the
5937  * inode changes.  But, it is most likely to find the inode in cache.
5938  * FIXME, needs more benchmarking...there are no reasons other than performance
5939  * to keep or drop this code.
5940  */
5941 static int btrfs_dirty_inode(struct inode *inode)
5942 {
5943         struct btrfs_root *root = BTRFS_I(inode)->root;
5944         struct btrfs_trans_handle *trans;
5945         int ret;
5946
5947         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5948                 return 0;
5949
5950         trans = btrfs_join_transaction(root);
5951         if (IS_ERR(trans))
5952                 return PTR_ERR(trans);
5953
5954         ret = btrfs_update_inode(trans, root, inode);
5955         if (ret && ret == -ENOSPC) {
5956                 /* whoops, lets try again with the full transaction */
5957                 btrfs_end_transaction(trans, root);
5958                 trans = btrfs_start_transaction(root, 1);
5959                 if (IS_ERR(trans))
5960                         return PTR_ERR(trans);
5961
5962                 ret = btrfs_update_inode(trans, root, inode);
5963         }
5964         btrfs_end_transaction(trans, root);
5965         if (BTRFS_I(inode)->delayed_node)
5966                 btrfs_balance_delayed_items(root);
5967
5968         return ret;
5969 }
5970
5971 /*
5972  * This is a copy of file_update_time.  We need this so we can return error on
5973  * ENOSPC for updating the inode in the case of file write and mmap writes.
5974  */
5975 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5976                              int flags)
5977 {
5978         struct btrfs_root *root = BTRFS_I(inode)->root;
5979
5980         if (btrfs_root_readonly(root))
5981                 return -EROFS;
5982
5983         if (flags & S_VERSION)
5984                 inode_inc_iversion(inode);
5985         if (flags & S_CTIME)
5986                 inode->i_ctime = *now;
5987         if (flags & S_MTIME)
5988                 inode->i_mtime = *now;
5989         if (flags & S_ATIME)
5990                 inode->i_atime = *now;
5991         return btrfs_dirty_inode(inode);
5992 }
5993
5994 /*
5995  * find the highest existing sequence number in a directory
5996  * and then set the in-memory index_cnt variable to reflect
5997  * free sequence numbers
5998  */
5999 static int btrfs_set_inode_index_count(struct inode *inode)
6000 {
6001         struct btrfs_root *root = BTRFS_I(inode)->root;
6002         struct btrfs_key key, found_key;
6003         struct btrfs_path *path;
6004         struct extent_buffer *leaf;
6005         int ret;
6006
6007         key.objectid = btrfs_ino(inode);
6008         key.type = BTRFS_DIR_INDEX_KEY;
6009         key.offset = (u64)-1;
6010
6011         path = btrfs_alloc_path();
6012         if (!path)
6013                 return -ENOMEM;
6014
6015         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6016         if (ret < 0)
6017                 goto out;
6018         /* FIXME: we should be able to handle this */
6019         if (ret == 0)
6020                 goto out;
6021         ret = 0;
6022
6023         /*
6024          * MAGIC NUMBER EXPLANATION:
6025          * since we search a directory based on f_pos we have to start at 2
6026          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6027          * else has to start at 2
6028          */
6029         if (path->slots[0] == 0) {
6030                 BTRFS_I(inode)->index_cnt = 2;
6031                 goto out;
6032         }
6033
6034         path->slots[0]--;
6035
6036         leaf = path->nodes[0];
6037         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6038
6039         if (found_key.objectid != btrfs_ino(inode) ||
6040             found_key.type != BTRFS_DIR_INDEX_KEY) {
6041                 BTRFS_I(inode)->index_cnt = 2;
6042                 goto out;
6043         }
6044
6045         BTRFS_I(inode)->index_cnt = found_key.offset + 1;
6046 out:
6047         btrfs_free_path(path);
6048         return ret;
6049 }
6050
6051 /*
6052  * helper to find a free sequence number in a given directory.  This current
6053  * code is very simple, later versions will do smarter things in the btree
6054  */
6055 int btrfs_set_inode_index(struct inode *dir, u64 *index)
6056 {
6057         int ret = 0;
6058
6059         if (BTRFS_I(dir)->index_cnt == (u64)-1) {
6060                 ret = btrfs_inode_delayed_dir_index_count(dir);
6061                 if (ret) {
6062                         ret = btrfs_set_inode_index_count(dir);
6063                         if (ret)
6064                                 return ret;
6065                 }
6066         }
6067
6068         *index = BTRFS_I(dir)->index_cnt;
6069         BTRFS_I(dir)->index_cnt++;
6070
6071         return ret;
6072 }
6073
6074 static int btrfs_insert_inode_locked(struct inode *inode)
6075 {
6076         struct btrfs_iget_args args;
6077         args.location = &BTRFS_I(inode)->location;
6078         args.root = BTRFS_I(inode)->root;
6079
6080         return insert_inode_locked4(inode,
6081                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
6082                    btrfs_find_actor, &args);
6083 }
6084
6085 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
6086                                      struct btrfs_root *root,
6087                                      struct inode *dir,
6088                                      const char *name, int name_len,
6089                                      u64 ref_objectid, u64 objectid,
6090                                      umode_t mode, u64 *index)
6091 {
6092         struct inode *inode;
6093         struct btrfs_inode_item *inode_item;
6094         struct btrfs_key *location;
6095         struct btrfs_path *path;
6096         struct btrfs_inode_ref *ref;
6097         struct btrfs_key key[2];
6098         u32 sizes[2];
6099         int nitems = name ? 2 : 1;
6100         unsigned long ptr;
6101         int ret;
6102
6103         path = btrfs_alloc_path();
6104         if (!path)
6105                 return ERR_PTR(-ENOMEM);
6106
6107         inode = new_inode(root->fs_info->sb);
6108         if (!inode) {
6109                 btrfs_free_path(path);
6110                 return ERR_PTR(-ENOMEM);
6111         }
6112
6113         /*
6114          * O_TMPFILE, set link count to 0, so that after this point,
6115          * we fill in an inode item with the correct link count.
6116          */
6117         if (!name)
6118                 set_nlink(inode, 0);
6119
6120         /*
6121          * we have to initialize this early, so we can reclaim the inode
6122          * number if we fail afterwards in this function.
6123          */
6124         inode->i_ino = objectid;
6125
6126         if (dir && name) {
6127                 trace_btrfs_inode_request(dir);
6128
6129                 ret = btrfs_set_inode_index(dir, index);
6130                 if (ret) {
6131                         btrfs_free_path(path);
6132                         iput(inode);
6133                         return ERR_PTR(ret);
6134                 }
6135         } else if (dir) {
6136                 *index = 0;
6137         }
6138         /*
6139          * index_cnt is ignored for everything but a dir,
6140          * btrfs_get_inode_index_count has an explanation for the magic
6141          * number
6142          */
6143         BTRFS_I(inode)->index_cnt = 2;
6144         BTRFS_I(inode)->dir_index = *index;
6145         BTRFS_I(inode)->root = root;
6146         BTRFS_I(inode)->generation = trans->transid;
6147         inode->i_generation = BTRFS_I(inode)->generation;
6148
6149         /*
6150          * We could have gotten an inode number from somebody who was fsynced
6151          * and then removed in this same transaction, so let's just set full
6152          * sync since it will be a full sync anyway and this will blow away the
6153          * old info in the log.
6154          */
6155         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6156
6157         key[0].objectid = objectid;
6158         key[0].type = BTRFS_INODE_ITEM_KEY;
6159         key[0].offset = 0;
6160
6161         sizes[0] = sizeof(struct btrfs_inode_item);
6162
6163         if (name) {
6164                 /*
6165                  * Start new inodes with an inode_ref. This is slightly more
6166                  * efficient for small numbers of hard links since they will
6167                  * be packed into one item. Extended refs will kick in if we
6168                  * add more hard links than can fit in the ref item.
6169                  */
6170                 key[1].objectid = objectid;
6171                 key[1].type = BTRFS_INODE_REF_KEY;
6172                 key[1].offset = ref_objectid;
6173
6174                 sizes[1] = name_len + sizeof(*ref);
6175         }
6176
6177         location = &BTRFS_I(inode)->location;
6178         location->objectid = objectid;
6179         location->offset = 0;
6180         location->type = BTRFS_INODE_ITEM_KEY;
6181
6182         ret = btrfs_insert_inode_locked(inode);
6183         if (ret < 0)
6184                 goto fail;
6185
6186         path->leave_spinning = 1;
6187         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6188         if (ret != 0)
6189                 goto fail_unlock;
6190
6191         inode_init_owner(inode, dir, mode);
6192         inode_set_bytes(inode, 0);
6193
6194         inode->i_mtime = CURRENT_TIME;
6195         inode->i_atime = inode->i_mtime;
6196         inode->i_ctime = inode->i_mtime;
6197         BTRFS_I(inode)->i_otime = inode->i_mtime;
6198
6199         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6200                                   struct btrfs_inode_item);
6201         memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
6202                              sizeof(*inode_item));
6203         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6204
6205         if (name) {
6206                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6207                                      struct btrfs_inode_ref);
6208                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6209                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6210                 ptr = (unsigned long)(ref + 1);
6211                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6212         }
6213
6214         btrfs_mark_buffer_dirty(path->nodes[0]);
6215         btrfs_free_path(path);
6216
6217         btrfs_inherit_iflags(inode, dir);
6218
6219         if (S_ISREG(mode)) {
6220                 if (btrfs_test_opt(root, NODATASUM))
6221                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6222                 if (btrfs_test_opt(root, NODATACOW))
6223                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6224                                 BTRFS_INODE_NODATASUM;
6225         }
6226
6227         inode_tree_add(inode);
6228
6229         trace_btrfs_inode_new(inode);
6230         btrfs_set_inode_last_trans(trans, inode);
6231
6232         btrfs_update_root_times(trans, root);
6233
6234         ret = btrfs_inode_inherit_props(trans, inode, dir);
6235         if (ret)
6236                 btrfs_err(root->fs_info,
6237                           "error inheriting props for ino %llu (root %llu): %d",
6238                           btrfs_ino(inode), root->root_key.objectid, ret);
6239
6240         return inode;
6241
6242 fail_unlock:
6243         unlock_new_inode(inode);
6244 fail:
6245         if (dir && name)
6246                 BTRFS_I(dir)->index_cnt--;
6247         btrfs_free_path(path);
6248         iput(inode);
6249         return ERR_PTR(ret);
6250 }
6251
6252 static inline u8 btrfs_inode_type(struct inode *inode)
6253 {
6254         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6255 }
6256
6257 /*
6258  * utility function to add 'inode' into 'parent_inode' with
6259  * a give name and a given sequence number.
6260  * if 'add_backref' is true, also insert a backref from the
6261  * inode to the parent directory.
6262  */
6263 int btrfs_add_link(struct btrfs_trans_handle *trans,
6264                    struct inode *parent_inode, struct inode *inode,
6265                    const char *name, int name_len, int add_backref, u64 index)
6266 {
6267         int ret = 0;
6268         struct btrfs_key key;
6269         struct btrfs_root *root = BTRFS_I(parent_inode)->root;
6270         u64 ino = btrfs_ino(inode);
6271         u64 parent_ino = btrfs_ino(parent_inode);
6272
6273         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6274                 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
6275         } else {
6276                 key.objectid = ino;
6277                 key.type = BTRFS_INODE_ITEM_KEY;
6278                 key.offset = 0;
6279         }
6280
6281         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6282                 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
6283                                          key.objectid, root->root_key.objectid,
6284                                          parent_ino, index, name, name_len);
6285         } else if (add_backref) {
6286                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6287                                              parent_ino, index);
6288         }
6289
6290         /* Nothing to clean up yet */
6291         if (ret)
6292                 return ret;
6293
6294         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6295                                     parent_inode, &key,
6296                                     btrfs_inode_type(inode), index);
6297         if (ret == -EEXIST || ret == -EOVERFLOW)
6298                 goto fail_dir_item;
6299         else if (ret) {
6300                 btrfs_abort_transaction(trans, root, ret);
6301                 return ret;
6302         }
6303
6304         btrfs_i_size_write(parent_inode, parent_inode->i_size +
6305                            name_len * 2);
6306         inode_inc_iversion(parent_inode);
6307         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
6308         ret = btrfs_update_inode(trans, root, parent_inode);
6309         if (ret)
6310                 btrfs_abort_transaction(trans, root, ret);
6311         return ret;
6312
6313 fail_dir_item:
6314         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6315                 u64 local_index;
6316                 int err;
6317                 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
6318                                  key.objectid, root->root_key.objectid,
6319                                  parent_ino, &local_index, name, name_len);
6320
6321         } else if (add_backref) {
6322                 u64 local_index;
6323                 int err;
6324
6325                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6326                                           ino, parent_ino, &local_index);
6327         }
6328         return ret;
6329 }
6330
6331 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6332                             struct inode *dir, struct dentry *dentry,
6333                             struct inode *inode, int backref, u64 index)
6334 {
6335         int err = btrfs_add_link(trans, dir, inode,
6336                                  dentry->d_name.name, dentry->d_name.len,
6337                                  backref, index);
6338         if (err > 0)
6339                 err = -EEXIST;
6340         return err;
6341 }
6342
6343 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6344                         umode_t mode, dev_t rdev)
6345 {
6346         struct btrfs_trans_handle *trans;
6347         struct btrfs_root *root = BTRFS_I(dir)->root;
6348         struct inode *inode = NULL;
6349         int err;
6350         int drop_inode = 0;
6351         u64 objectid;
6352         u64 index = 0;
6353
6354         /*
6355          * 2 for inode item and ref
6356          * 2 for dir items
6357          * 1 for xattr if selinux is on
6358          */
6359         trans = btrfs_start_transaction(root, 5);
6360         if (IS_ERR(trans))
6361                 return PTR_ERR(trans);
6362
6363         err = btrfs_find_free_ino(root, &objectid);
6364         if (err)
6365                 goto out_unlock;
6366
6367         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6368                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6369                                 mode, &index);
6370         if (IS_ERR(inode)) {
6371                 err = PTR_ERR(inode);
6372                 goto out_unlock;
6373         }
6374
6375         /*
6376         * If the active LSM wants to access the inode during
6377         * d_instantiate it needs these. Smack checks to see
6378         * if the filesystem supports xattrs by looking at the
6379         * ops vector.
6380         */
6381         inode->i_op = &btrfs_special_inode_operations;
6382         init_special_inode(inode, inode->i_mode, rdev);
6383
6384         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6385         if (err)
6386                 goto out_unlock_inode;
6387
6388         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6389         if (err) {
6390                 goto out_unlock_inode;
6391         } else {
6392                 btrfs_update_inode(trans, root, inode);
6393                 unlock_new_inode(inode);
6394                 d_instantiate(dentry, inode);
6395         }
6396
6397 out_unlock:
6398         btrfs_end_transaction(trans, root);
6399         btrfs_balance_delayed_items(root);
6400         btrfs_btree_balance_dirty(root);
6401         if (drop_inode) {
6402                 inode_dec_link_count(inode);
6403                 iput(inode);
6404         }
6405         return err;
6406
6407 out_unlock_inode:
6408         drop_inode = 1;
6409         unlock_new_inode(inode);
6410         goto out_unlock;
6411
6412 }
6413
6414 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6415                         umode_t mode, bool excl)
6416 {
6417         struct btrfs_trans_handle *trans;
6418         struct btrfs_root *root = BTRFS_I(dir)->root;
6419         struct inode *inode = NULL;
6420         int drop_inode_on_err = 0;
6421         int err;
6422         u64 objectid;
6423         u64 index = 0;
6424
6425         /*
6426          * 2 for inode item and ref
6427          * 2 for dir items
6428          * 1 for xattr if selinux is on
6429          */
6430         trans = btrfs_start_transaction(root, 5);
6431         if (IS_ERR(trans))
6432                 return PTR_ERR(trans);
6433
6434         err = btrfs_find_free_ino(root, &objectid);
6435         if (err)
6436                 goto out_unlock;
6437
6438         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6439                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6440                                 mode, &index);
6441         if (IS_ERR(inode)) {
6442                 err = PTR_ERR(inode);
6443                 goto out_unlock;
6444         }
6445         drop_inode_on_err = 1;
6446         /*
6447         * If the active LSM wants to access the inode during
6448         * d_instantiate it needs these. Smack checks to see
6449         * if the filesystem supports xattrs by looking at the
6450         * ops vector.
6451         */
6452         inode->i_fop = &btrfs_file_operations;
6453         inode->i_op = &btrfs_file_inode_operations;
6454         inode->i_mapping->a_ops = &btrfs_aops;
6455
6456         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6457         if (err)
6458                 goto out_unlock_inode;
6459
6460         err = btrfs_update_inode(trans, root, inode);
6461         if (err)
6462                 goto out_unlock_inode;
6463
6464         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6465         if (err)
6466                 goto out_unlock_inode;
6467
6468         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6469         unlock_new_inode(inode);
6470         d_instantiate(dentry, inode);
6471
6472 out_unlock:
6473         btrfs_end_transaction(trans, root);
6474         if (err && drop_inode_on_err) {
6475                 inode_dec_link_count(inode);
6476                 iput(inode);
6477         }
6478         btrfs_balance_delayed_items(root);
6479         btrfs_btree_balance_dirty(root);
6480         return err;
6481
6482 out_unlock_inode:
6483         unlock_new_inode(inode);
6484         goto out_unlock;
6485
6486 }
6487
6488 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6489                       struct dentry *dentry)
6490 {
6491         struct btrfs_trans_handle *trans;
6492         struct btrfs_root *root = BTRFS_I(dir)->root;
6493         struct inode *inode = d_inode(old_dentry);
6494         u64 index;
6495         int err;
6496         int drop_inode = 0;
6497
6498         /* do not allow sys_link's with other subvols of the same device */
6499         if (root->objectid != BTRFS_I(inode)->root->objectid)
6500                 return -EXDEV;
6501
6502         if (inode->i_nlink >= BTRFS_LINK_MAX)
6503                 return -EMLINK;
6504
6505         err = btrfs_set_inode_index(dir, &index);
6506         if (err)
6507                 goto fail;
6508
6509         /*
6510          * 2 items for inode and inode ref
6511          * 2 items for dir items
6512          * 1 item for parent inode
6513          */
6514         trans = btrfs_start_transaction(root, 5);
6515         if (IS_ERR(trans)) {
6516                 err = PTR_ERR(trans);
6517                 goto fail;
6518         }
6519
6520         /* There are several dir indexes for this inode, clear the cache. */
6521         BTRFS_I(inode)->dir_index = 0ULL;
6522         inc_nlink(inode);
6523         inode_inc_iversion(inode);
6524         inode->i_ctime = CURRENT_TIME;
6525         ihold(inode);
6526         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6527
6528         err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
6529
6530         if (err) {
6531                 drop_inode = 1;
6532         } else {
6533                 struct dentry *parent = dentry->d_parent;
6534                 err = btrfs_update_inode(trans, root, inode);
6535                 if (err)
6536                         goto fail;
6537                 if (inode->i_nlink == 1) {
6538                         /*
6539                          * If new hard link count is 1, it's a file created
6540                          * with open(2) O_TMPFILE flag.
6541                          */
6542                         err = btrfs_orphan_del(trans, inode);
6543                         if (err)
6544                                 goto fail;
6545                 }
6546                 d_instantiate(dentry, inode);
6547                 btrfs_log_new_name(trans, inode, NULL, parent);
6548         }
6549
6550         btrfs_end_transaction(trans, root);
6551         btrfs_balance_delayed_items(root);
6552 fail:
6553         if (drop_inode) {
6554                 inode_dec_link_count(inode);
6555                 iput(inode);
6556         }
6557         btrfs_btree_balance_dirty(root);
6558         return err;
6559 }
6560
6561 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6562 {
6563         struct inode *inode = NULL;
6564         struct btrfs_trans_handle *trans;
6565         struct btrfs_root *root = BTRFS_I(dir)->root;
6566         int err = 0;
6567         int drop_on_err = 0;
6568         u64 objectid = 0;
6569         u64 index = 0;
6570
6571         /*
6572          * 2 items for inode and ref
6573          * 2 items for dir items
6574          * 1 for xattr if selinux is on
6575          */
6576         trans = btrfs_start_transaction(root, 5);
6577         if (IS_ERR(trans))
6578                 return PTR_ERR(trans);
6579
6580         err = btrfs_find_free_ino(root, &objectid);
6581         if (err)
6582                 goto out_fail;
6583
6584         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6585                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6586                                 S_IFDIR | mode, &index);
6587         if (IS_ERR(inode)) {
6588                 err = PTR_ERR(inode);
6589                 goto out_fail;
6590         }
6591
6592         drop_on_err = 1;
6593         /* these must be set before we unlock the inode */
6594         inode->i_op = &btrfs_dir_inode_operations;
6595         inode->i_fop = &btrfs_dir_file_operations;
6596
6597         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6598         if (err)
6599                 goto out_fail_inode;
6600
6601         btrfs_i_size_write(inode, 0);
6602         err = btrfs_update_inode(trans, root, inode);
6603         if (err)
6604                 goto out_fail_inode;
6605
6606         err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
6607                              dentry->d_name.len, 0, index);
6608         if (err)
6609                 goto out_fail_inode;
6610
6611         d_instantiate(dentry, inode);
6612         /*
6613          * mkdir is special.  We're unlocking after we call d_instantiate
6614          * to avoid a race with nfsd calling d_instantiate.
6615          */
6616         unlock_new_inode(inode);
6617         drop_on_err = 0;
6618
6619 out_fail:
6620         btrfs_end_transaction(trans, root);
6621         if (drop_on_err) {
6622                 inode_dec_link_count(inode);
6623                 iput(inode);
6624         }
6625         btrfs_balance_delayed_items(root);
6626         btrfs_btree_balance_dirty(root);
6627         return err;
6628
6629 out_fail_inode:
6630         unlock_new_inode(inode);
6631         goto out_fail;
6632 }
6633
6634 /* Find next extent map of a given extent map, caller needs to ensure locks */
6635 static struct extent_map *next_extent_map(struct extent_map *em)
6636 {
6637         struct rb_node *next;
6638
6639         next = rb_next(&em->rb_node);
6640         if (!next)
6641                 return NULL;
6642         return container_of(next, struct extent_map, rb_node);
6643 }
6644
6645 static struct extent_map *prev_extent_map(struct extent_map *em)
6646 {
6647         struct rb_node *prev;
6648
6649         prev = rb_prev(&em->rb_node);
6650         if (!prev)
6651                 return NULL;
6652         return container_of(prev, struct extent_map, rb_node);
6653 }
6654
6655 /* helper for btfs_get_extent.  Given an existing extent in the tree,
6656  * the existing extent is the nearest extent to map_start,
6657  * and an extent that you want to insert, deal with overlap and insert
6658  * the best fitted new extent into the tree.
6659  */
6660 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6661                                 struct extent_map *existing,
6662                                 struct extent_map *em,
6663                                 u64 map_start)
6664 {
6665         struct extent_map *prev;
6666         struct extent_map *next;
6667         u64 start;
6668         u64 end;
6669         u64 start_diff;
6670
6671         BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6672
6673         if (existing->start > map_start) {
6674                 next = existing;
6675                 prev = prev_extent_map(next);
6676         } else {
6677                 prev = existing;
6678                 next = next_extent_map(prev);
6679         }
6680
6681         start = prev ? extent_map_end(prev) : em->start;
6682         start = max_t(u64, start, em->start);
6683         end = next ? next->start : extent_map_end(em);
6684         end = min_t(u64, end, extent_map_end(em));
6685         start_diff = start - em->start;
6686         em->start = start;
6687         em->len = end - start;
6688         if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6689             !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6690                 em->block_start += start_diff;
6691                 em->block_len -= start_diff;
6692         }
6693         return add_extent_mapping(em_tree, em, 0);
6694 }
6695
6696 static noinline int uncompress_inline(struct btrfs_path *path,
6697                                       struct inode *inode, struct page *page,
6698                                       size_t pg_offset, u64 extent_offset,
6699                                       struct btrfs_file_extent_item *item)
6700 {
6701         int ret;
6702         struct extent_buffer *leaf = path->nodes[0];
6703         char *tmp;
6704         size_t max_size;
6705         unsigned long inline_size;
6706         unsigned long ptr;
6707         int compress_type;
6708
6709         WARN_ON(pg_offset != 0);
6710         compress_type = btrfs_file_extent_compression(leaf, item);
6711         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6712         inline_size = btrfs_file_extent_inline_item_len(leaf,
6713                                         btrfs_item_nr(path->slots[0]));
6714         tmp = kmalloc(inline_size, GFP_NOFS);
6715         if (!tmp)
6716                 return -ENOMEM;
6717         ptr = btrfs_file_extent_inline_start(item);
6718
6719         read_extent_buffer(leaf, tmp, ptr, inline_size);
6720
6721         max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
6722         ret = btrfs_decompress(compress_type, tmp, page,
6723                                extent_offset, inline_size, max_size);
6724         kfree(tmp);
6725         return ret;
6726 }
6727
6728 /*
6729  * a bit scary, this does extent mapping from logical file offset to the disk.
6730  * the ugly parts come from merging extents from the disk with the in-ram
6731  * representation.  This gets more complex because of the data=ordered code,
6732  * where the in-ram extents might be locked pending data=ordered completion.
6733  *
6734  * This also copies inline extents directly into the page.
6735  */
6736
6737 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6738                                     size_t pg_offset, u64 start, u64 len,
6739                                     int create)
6740 {
6741         int ret;
6742         int err = 0;
6743         u64 extent_start = 0;
6744         u64 extent_end = 0;
6745         u64 objectid = btrfs_ino(inode);
6746         u32 found_type;
6747         struct btrfs_path *path = NULL;
6748         struct btrfs_root *root = BTRFS_I(inode)->root;
6749         struct btrfs_file_extent_item *item;
6750         struct extent_buffer *leaf;
6751         struct btrfs_key found_key;
6752         struct extent_map *em = NULL;
6753         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6754         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6755         struct btrfs_trans_handle *trans = NULL;
6756         const bool new_inline = !page || create;
6757
6758 again:
6759         read_lock(&em_tree->lock);
6760         em = lookup_extent_mapping(em_tree, start, len);
6761         if (em)
6762                 em->bdev = root->fs_info->fs_devices->latest_bdev;
6763         read_unlock(&em_tree->lock);
6764
6765         if (em) {
6766                 if (em->start > start || em->start + em->len <= start)
6767                         free_extent_map(em);
6768                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6769                         free_extent_map(em);
6770                 else
6771                         goto out;
6772         }
6773         em = alloc_extent_map();
6774         if (!em) {
6775                 err = -ENOMEM;
6776                 goto out;
6777         }
6778         em->bdev = root->fs_info->fs_devices->latest_bdev;
6779         em->start = EXTENT_MAP_HOLE;
6780         em->orig_start = EXTENT_MAP_HOLE;
6781         em->len = (u64)-1;
6782         em->block_len = (u64)-1;
6783
6784         if (!path) {
6785                 path = btrfs_alloc_path();
6786                 if (!path) {
6787                         err = -ENOMEM;
6788                         goto out;
6789                 }
6790                 /*
6791                  * Chances are we'll be called again, so go ahead and do
6792                  * readahead
6793                  */
6794                 path->reada = 1;
6795         }
6796
6797         ret = btrfs_lookup_file_extent(trans, root, path,
6798                                        objectid, start, trans != NULL);
6799         if (ret < 0) {
6800                 err = ret;
6801                 goto out;
6802         }
6803
6804         if (ret != 0) {
6805                 if (path->slots[0] == 0)
6806                         goto not_found;
6807                 path->slots[0]--;
6808         }
6809
6810         leaf = path->nodes[0];
6811         item = btrfs_item_ptr(leaf, path->slots[0],
6812                               struct btrfs_file_extent_item);
6813         /* are we inside the extent that was found? */
6814         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6815         found_type = found_key.type;
6816         if (found_key.objectid != objectid ||
6817             found_type != BTRFS_EXTENT_DATA_KEY) {
6818                 /*
6819                  * If we backup past the first extent we want to move forward
6820                  * and see if there is an extent in front of us, otherwise we'll
6821                  * say there is a hole for our whole search range which can
6822                  * cause problems.
6823                  */
6824                 extent_end = start;
6825                 goto next;
6826         }
6827
6828         found_type = btrfs_file_extent_type(leaf, item);
6829         extent_start = found_key.offset;
6830         if (found_type == BTRFS_FILE_EXTENT_REG ||
6831             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6832                 extent_end = extent_start +
6833                        btrfs_file_extent_num_bytes(leaf, item);
6834         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6835                 size_t size;
6836                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6837                 extent_end = ALIGN(extent_start + size, root->sectorsize);
6838         }
6839 next:
6840         if (start >= extent_end) {
6841                 path->slots[0]++;
6842                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6843                         ret = btrfs_next_leaf(root, path);
6844                         if (ret < 0) {
6845                                 err = ret;
6846                                 goto out;
6847                         }
6848                         if (ret > 0)
6849                                 goto not_found;
6850                         leaf = path->nodes[0];
6851                 }
6852                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6853                 if (found_key.objectid != objectid ||
6854                     found_key.type != BTRFS_EXTENT_DATA_KEY)
6855                         goto not_found;
6856                 if (start + len <= found_key.offset)
6857                         goto not_found;
6858                 if (start > found_key.offset)
6859                         goto next;
6860                 em->start = start;
6861                 em->orig_start = start;
6862                 em->len = found_key.offset - start;
6863                 goto not_found_em;
6864         }
6865
6866         btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em);
6867
6868         if (found_type == BTRFS_FILE_EXTENT_REG ||
6869             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6870                 goto insert;
6871         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6872                 unsigned long ptr;
6873                 char *map;
6874                 size_t size;
6875                 size_t extent_offset;
6876                 size_t copy_size;
6877
6878                 if (new_inline)
6879                         goto out;
6880
6881                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6882                 extent_offset = page_offset(page) + pg_offset - extent_start;
6883                 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6884                                 size - extent_offset);
6885                 em->start = extent_start + extent_offset;
6886                 em->len = ALIGN(copy_size, root->sectorsize);
6887                 em->orig_block_len = em->len;
6888                 em->orig_start = em->start;
6889                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6890                 if (create == 0 && !PageUptodate(page)) {
6891                         if (btrfs_file_extent_compression(leaf, item) !=
6892                             BTRFS_COMPRESS_NONE) {
6893                                 ret = uncompress_inline(path, inode, page,
6894                                                         pg_offset,
6895                                                         extent_offset, item);
6896                                 if (ret) {
6897                                         err = ret;
6898                                         goto out;
6899                                 }
6900                         } else {
6901                                 map = kmap(page);
6902                                 read_extent_buffer(leaf, map + pg_offset, ptr,
6903                                                    copy_size);
6904                                 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6905                                         memset(map + pg_offset + copy_size, 0,
6906                                                PAGE_CACHE_SIZE - pg_offset -
6907                                                copy_size);
6908                                 }
6909                                 kunmap(page);
6910                         }
6911                         flush_dcache_page(page);
6912                 } else if (create && PageUptodate(page)) {
6913                         BUG();
6914                         if (!trans) {
6915                                 kunmap(page);
6916                                 free_extent_map(em);
6917                                 em = NULL;
6918
6919                                 btrfs_release_path(path);
6920                                 trans = btrfs_join_transaction(root);
6921
6922                                 if (IS_ERR(trans))
6923                                         return ERR_CAST(trans);
6924                                 goto again;
6925                         }
6926                         map = kmap(page);
6927                         write_extent_buffer(leaf, map + pg_offset, ptr,
6928                                             copy_size);
6929                         kunmap(page);
6930                         btrfs_mark_buffer_dirty(leaf);
6931                 }
6932                 set_extent_uptodate(io_tree, em->start,
6933                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
6934                 goto insert;
6935         }
6936 not_found:
6937         em->start = start;
6938         em->orig_start = start;
6939         em->len = len;
6940 not_found_em:
6941         em->block_start = EXTENT_MAP_HOLE;
6942         set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6943 insert:
6944         btrfs_release_path(path);
6945         if (em->start > start || extent_map_end(em) <= start) {
6946                 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6947                         em->start, em->len, start, len);
6948                 err = -EIO;
6949                 goto out;
6950         }
6951
6952         err = 0;
6953         write_lock(&em_tree->lock);
6954         ret = add_extent_mapping(em_tree, em, 0);
6955         /* it is possible that someone inserted the extent into the tree
6956          * while we had the lock dropped.  It is also possible that
6957          * an overlapping map exists in the tree
6958          */
6959         if (ret == -EEXIST) {
6960                 struct extent_map *existing;
6961
6962                 ret = 0;
6963
6964                 existing = search_extent_mapping(em_tree, start, len);
6965                 /*
6966                  * existing will always be non-NULL, since there must be
6967                  * extent causing the -EEXIST.
6968                  */
6969                 if (start >= extent_map_end(existing) ||
6970                     start <= existing->start) {
6971                         /*
6972                          * The existing extent map is the one nearest to
6973                          * the [start, start + len) range which overlaps
6974                          */
6975                         err = merge_extent_mapping(em_tree, existing,
6976                                                    em, start);
6977                         free_extent_map(existing);
6978                         if (err) {
6979                                 free_extent_map(em);
6980                                 em = NULL;
6981                         }
6982                 } else {
6983                         free_extent_map(em);
6984                         em = existing;
6985                         err = 0;
6986                 }
6987         }
6988         write_unlock(&em_tree->lock);
6989 out:
6990
6991         trace_btrfs_get_extent(root, em);
6992
6993         btrfs_free_path(path);
6994         if (trans) {
6995                 ret = btrfs_end_transaction(trans, root);
6996                 if (!err)
6997                         err = ret;
6998         }
6999         if (err) {
7000                 free_extent_map(em);
7001                 return ERR_PTR(err);
7002         }
7003         BUG_ON(!em); /* Error is always set */
7004         return em;
7005 }
7006
7007 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
7008                                            size_t pg_offset, u64 start, u64 len,
7009                                            int create)
7010 {
7011         struct extent_map *em;
7012         struct extent_map *hole_em = NULL;
7013         u64 range_start = start;
7014         u64 end;
7015         u64 found;
7016         u64 found_end;
7017         int err = 0;
7018
7019         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
7020         if (IS_ERR(em))
7021                 return em;
7022         if (em) {
7023                 /*
7024                  * if our em maps to
7025                  * -  a hole or
7026                  * -  a pre-alloc extent,
7027                  * there might actually be delalloc bytes behind it.
7028                  */
7029                 if (em->block_start != EXTENT_MAP_HOLE &&
7030                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7031                         return em;
7032                 else
7033                         hole_em = em;
7034         }
7035
7036         /* check to see if we've wrapped (len == -1 or similar) */
7037         end = start + len;
7038         if (end < start)
7039                 end = (u64)-1;
7040         else
7041                 end -= 1;
7042
7043         em = NULL;
7044
7045         /* ok, we didn't find anything, lets look for delalloc */
7046         found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
7047                                  end, len, EXTENT_DELALLOC, 1);
7048         found_end = range_start + found;
7049         if (found_end < range_start)
7050                 found_end = (u64)-1;
7051
7052         /*
7053          * we didn't find anything useful, return
7054          * the original results from get_extent()
7055          */
7056         if (range_start > end || found_end <= start) {
7057                 em = hole_em;
7058                 hole_em = NULL;
7059                 goto out;
7060         }
7061
7062         /* adjust the range_start to make sure it doesn't
7063          * go backwards from the start they passed in
7064          */
7065         range_start = max(start, range_start);
7066         found = found_end - range_start;
7067
7068         if (found > 0) {
7069                 u64 hole_start = start;
7070                 u64 hole_len = len;
7071
7072                 em = alloc_extent_map();
7073                 if (!em) {
7074                         err = -ENOMEM;
7075                         goto out;
7076                 }
7077                 /*
7078                  * when btrfs_get_extent can't find anything it
7079                  * returns one huge hole
7080                  *
7081                  * make sure what it found really fits our range, and
7082                  * adjust to make sure it is based on the start from
7083                  * the caller
7084                  */
7085                 if (hole_em) {
7086                         u64 calc_end = extent_map_end(hole_em);
7087
7088                         if (calc_end <= start || (hole_em->start > end)) {
7089                                 free_extent_map(hole_em);
7090                                 hole_em = NULL;
7091                         } else {
7092                                 hole_start = max(hole_em->start, start);
7093                                 hole_len = calc_end - hole_start;
7094                         }
7095                 }
7096                 em->bdev = NULL;
7097                 if (hole_em && range_start > hole_start) {
7098                         /* our hole starts before our delalloc, so we
7099                          * have to return just the parts of the hole
7100                          * that go until  the delalloc starts
7101                          */
7102                         em->len = min(hole_len,
7103                                       range_start - hole_start);
7104                         em->start = hole_start;
7105                         em->orig_start = hole_start;
7106                         /*
7107                          * don't adjust block start at all,
7108                          * it is fixed at EXTENT_MAP_HOLE
7109                          */
7110                         em->block_start = hole_em->block_start;
7111                         em->block_len = hole_len;
7112                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7113                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7114                 } else {
7115                         em->start = range_start;
7116                         em->len = found;
7117                         em->orig_start = range_start;
7118                         em->block_start = EXTENT_MAP_DELALLOC;
7119                         em->block_len = found;
7120                 }
7121         } else if (hole_em) {
7122                 return hole_em;
7123         }
7124 out:
7125
7126         free_extent_map(hole_em);
7127         if (err) {
7128                 free_extent_map(em);
7129                 return ERR_PTR(err);
7130         }
7131         return em;
7132 }
7133
7134 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7135                                                   u64 start, u64 len)
7136 {
7137         struct btrfs_root *root = BTRFS_I(inode)->root;
7138         struct extent_map *em;
7139         struct btrfs_key ins;
7140         u64 alloc_hint;
7141         int ret;
7142
7143         alloc_hint = get_extent_allocation_hint(inode, start, len);
7144         ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
7145                                    alloc_hint, &ins, 1, 1);
7146         if (ret)
7147                 return ERR_PTR(ret);
7148
7149         em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
7150                               ins.offset, ins.offset, ins.offset, 0);
7151         if (IS_ERR(em)) {
7152                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7153                 return em;
7154         }
7155
7156         ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
7157                                            ins.offset, ins.offset, 0);
7158         if (ret) {
7159                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7160                 free_extent_map(em);
7161                 return ERR_PTR(ret);
7162         }
7163
7164         return em;
7165 }
7166
7167 /*
7168  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7169  * block must be cow'd
7170  */
7171 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7172                               u64 *orig_start, u64 *orig_block_len,
7173                               u64 *ram_bytes)
7174 {
7175         struct btrfs_trans_handle *trans;
7176         struct btrfs_path *path;
7177         int ret;
7178         struct extent_buffer *leaf;
7179         struct btrfs_root *root = BTRFS_I(inode)->root;
7180         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7181         struct btrfs_file_extent_item *fi;
7182         struct btrfs_key key;
7183         u64 disk_bytenr;
7184         u64 backref_offset;
7185         u64 extent_end;
7186         u64 num_bytes;
7187         int slot;
7188         int found_type;
7189         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7190
7191         path = btrfs_alloc_path();
7192         if (!path)
7193                 return -ENOMEM;
7194
7195         ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7196                                        offset, 0);
7197         if (ret < 0)
7198                 goto out;
7199
7200         slot = path->slots[0];
7201         if (ret == 1) {
7202                 if (slot == 0) {
7203                         /* can't find the item, must cow */
7204                         ret = 0;
7205                         goto out;
7206                 }
7207                 slot--;
7208         }
7209         ret = 0;
7210         leaf = path->nodes[0];
7211         btrfs_item_key_to_cpu(leaf, &key, slot);
7212         if (key.objectid != btrfs_ino(inode) ||
7213             key.type != BTRFS_EXTENT_DATA_KEY) {
7214                 /* not our file or wrong item type, must cow */
7215                 goto out;
7216         }
7217
7218         if (key.offset > offset) {
7219                 /* Wrong offset, must cow */
7220                 goto out;
7221         }
7222
7223         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7224         found_type = btrfs_file_extent_type(leaf, fi);
7225         if (found_type != BTRFS_FILE_EXTENT_REG &&
7226             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7227                 /* not a regular extent, must cow */
7228                 goto out;
7229         }
7230
7231         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7232                 goto out;
7233
7234         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7235         if (extent_end <= offset)
7236                 goto out;
7237
7238         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7239         if (disk_bytenr == 0)
7240                 goto out;
7241
7242         if (btrfs_file_extent_compression(leaf, fi) ||
7243             btrfs_file_extent_encryption(leaf, fi) ||
7244             btrfs_file_extent_other_encoding(leaf, fi))
7245                 goto out;
7246
7247         backref_offset = btrfs_file_extent_offset(leaf, fi);
7248
7249         if (orig_start) {
7250                 *orig_start = key.offset - backref_offset;
7251                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7252                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7253         }
7254
7255         if (btrfs_extent_readonly(root, disk_bytenr))
7256                 goto out;
7257
7258         num_bytes = min(offset + *len, extent_end) - offset;
7259         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7260                 u64 range_end;
7261
7262                 range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
7263                 ret = test_range_bit(io_tree, offset, range_end,
7264                                      EXTENT_DELALLOC, 0, NULL);
7265                 if (ret) {
7266                         ret = -EAGAIN;
7267                         goto out;
7268                 }
7269         }
7270
7271         btrfs_release_path(path);
7272
7273         /*
7274          * look for other files referencing this extent, if we
7275          * find any we must cow
7276          */
7277         trans = btrfs_join_transaction(root);
7278         if (IS_ERR(trans)) {
7279                 ret = 0;
7280                 goto out;
7281         }
7282
7283         ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
7284                                     key.offset - backref_offset, disk_bytenr);
7285         btrfs_end_transaction(trans, root);
7286         if (ret) {
7287                 ret = 0;
7288                 goto out;
7289         }
7290
7291         /*
7292          * adjust disk_bytenr and num_bytes to cover just the bytes
7293          * in this extent we are about to write.  If there
7294          * are any csums in that range we have to cow in order
7295          * to keep the csums correct
7296          */
7297         disk_bytenr += backref_offset;
7298         disk_bytenr += offset - key.offset;
7299         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
7300                                 goto out;
7301         /*
7302          * all of the above have passed, it is safe to overwrite this extent
7303          * without cow
7304          */
7305         *len = num_bytes;
7306         ret = 1;
7307 out:
7308         btrfs_free_path(path);
7309         return ret;
7310 }
7311
7312 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7313 {
7314         struct radix_tree_root *root = &inode->i_mapping->page_tree;
7315         int found = false;
7316         void **pagep = NULL;
7317         struct page *page = NULL;
7318         int start_idx;
7319         int end_idx;
7320
7321         start_idx = start >> PAGE_CACHE_SHIFT;
7322
7323         /*
7324          * end is the last byte in the last page.  end == start is legal
7325          */
7326         end_idx = end >> PAGE_CACHE_SHIFT;
7327
7328         rcu_read_lock();
7329
7330         /* Most of the code in this while loop is lifted from
7331          * find_get_page.  It's been modified to begin searching from a
7332          * page and return just the first page found in that range.  If the
7333          * found idx is less than or equal to the end idx then we know that
7334          * a page exists.  If no pages are found or if those pages are
7335          * outside of the range then we're fine (yay!) */
7336         while (page == NULL &&
7337                radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7338                 page = radix_tree_deref_slot(pagep);
7339                 if (unlikely(!page))
7340                         break;
7341
7342                 if (radix_tree_exception(page)) {
7343                         if (radix_tree_deref_retry(page)) {
7344                                 page = NULL;
7345                                 continue;
7346                         }
7347                         /*
7348                          * Otherwise, shmem/tmpfs must be storing a swap entry
7349                          * here as an exceptional entry: so return it without
7350                          * attempting to raise page count.
7351                          */
7352                         page = NULL;
7353                         break; /* TODO: Is this relevant for this use case? */
7354                 }
7355
7356                 if (!page_cache_get_speculative(page)) {
7357                         page = NULL;
7358                         continue;
7359                 }
7360
7361                 /*
7362                  * Has the page moved?
7363                  * This is part of the lockless pagecache protocol. See
7364                  * include/linux/pagemap.h for details.
7365                  */
7366                 if (unlikely(page != *pagep)) {
7367                         page_cache_release(page);
7368                         page = NULL;
7369                 }
7370         }
7371
7372         if (page) {
7373                 if (page->index <= end_idx)
7374                         found = true;
7375                 page_cache_release(page);
7376         }
7377
7378         rcu_read_unlock();
7379         return found;
7380 }
7381
7382 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7383                               struct extent_state **cached_state, int writing)
7384 {
7385         struct btrfs_ordered_extent *ordered;
7386         int ret = 0;
7387
7388         while (1) {
7389                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7390                                  cached_state);
7391                 /*
7392                  * We're concerned with the entire range that we're going to be
7393                  * doing DIO to, so we need to make sure theres no ordered
7394                  * extents in this range.
7395                  */
7396                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
7397                                                      lockend - lockstart + 1);
7398
7399                 /*
7400                  * We need to make sure there are no buffered pages in this
7401                  * range either, we could have raced between the invalidate in
7402                  * generic_file_direct_write and locking the extent.  The
7403                  * invalidate needs to happen so that reads after a write do not
7404                  * get stale data.
7405                  */
7406                 if (!ordered &&
7407                     (!writing ||
7408                      !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7409                         break;
7410
7411                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7412                                      cached_state, GFP_NOFS);
7413
7414                 if (ordered) {
7415                         btrfs_start_ordered_extent(inode, ordered, 1);
7416                         btrfs_put_ordered_extent(ordered);
7417                 } else {
7418                         /*
7419                          * We could trigger writeback for this range (and wait
7420                          * for it to complete) and then invalidate the pages for
7421                          * this range (through invalidate_inode_pages2_range()),
7422                          * but that can lead us to a deadlock with a concurrent
7423                          * call to readpages() (a buffered read or a defrag call
7424                          * triggered a readahead) on a page lock due to an
7425                          * ordered dio extent we created before but did not have
7426                          * yet a corresponding bio submitted (whence it can not
7427                          * complete), which makes readpages() wait for that
7428                          * ordered extent to complete while holding a lock on
7429                          * that page.
7430                          */
7431                         ret = -ENOTBLK;
7432                         break;
7433                 }
7434
7435                 cond_resched();
7436         }
7437
7438         return ret;
7439 }
7440
7441 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7442                                            u64 len, u64 orig_start,
7443                                            u64 block_start, u64 block_len,
7444                                            u64 orig_block_len, u64 ram_bytes,
7445                                            int type)
7446 {
7447         struct extent_map_tree *em_tree;
7448         struct extent_map *em;
7449         struct btrfs_root *root = BTRFS_I(inode)->root;
7450         int ret;
7451
7452         em_tree = &BTRFS_I(inode)->extent_tree;
7453         em = alloc_extent_map();
7454         if (!em)
7455                 return ERR_PTR(-ENOMEM);
7456
7457         em->start = start;
7458         em->orig_start = orig_start;
7459         em->mod_start = start;
7460         em->mod_len = len;
7461         em->len = len;
7462         em->block_len = block_len;
7463         em->block_start = block_start;
7464         em->bdev = root->fs_info->fs_devices->latest_bdev;
7465         em->orig_block_len = orig_block_len;
7466         em->ram_bytes = ram_bytes;
7467         em->generation = -1;
7468         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7469         if (type == BTRFS_ORDERED_PREALLOC)
7470                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7471
7472         do {
7473                 btrfs_drop_extent_cache(inode, em->start,
7474                                 em->start + em->len - 1, 0);
7475                 write_lock(&em_tree->lock);
7476                 ret = add_extent_mapping(em_tree, em, 1);
7477                 write_unlock(&em_tree->lock);
7478         } while (ret == -EEXIST);
7479
7480         if (ret) {
7481                 free_extent_map(em);
7482                 return ERR_PTR(ret);
7483         }
7484
7485         return em;
7486 }
7487
7488 static void adjust_dio_outstanding_extents(struct inode *inode,
7489                                            struct btrfs_dio_data *dio_data,
7490                                            const u64 len)
7491 {
7492         unsigned num_extents;
7493
7494         num_extents = (unsigned) div64_u64(len + BTRFS_MAX_EXTENT_SIZE - 1,
7495                                            BTRFS_MAX_EXTENT_SIZE);
7496         /*
7497          * If we have an outstanding_extents count still set then we're
7498          * within our reservation, otherwise we need to adjust our inode
7499          * counter appropriately.
7500          */
7501         if (dio_data->outstanding_extents) {
7502                 dio_data->outstanding_extents -= num_extents;
7503         } else {
7504                 spin_lock(&BTRFS_I(inode)->lock);
7505                 BTRFS_I(inode)->outstanding_extents += num_extents;
7506                 spin_unlock(&BTRFS_I(inode)->lock);
7507         }
7508 }
7509
7510 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7511                                    struct buffer_head *bh_result, int create)
7512 {
7513         struct extent_map *em;
7514         struct btrfs_root *root = BTRFS_I(inode)->root;
7515         struct extent_state *cached_state = NULL;
7516         struct btrfs_dio_data *dio_data = NULL;
7517         u64 start = iblock << inode->i_blkbits;
7518         u64 lockstart, lockend;
7519         u64 len = bh_result->b_size;
7520         int unlock_bits = EXTENT_LOCKED;
7521         int ret = 0;
7522
7523         if (create)
7524                 unlock_bits |= EXTENT_DIRTY;
7525         else
7526                 len = min_t(u64, len, root->sectorsize);
7527
7528         lockstart = start;
7529         lockend = start + len - 1;
7530
7531         if (current->journal_info) {
7532                 /*
7533                  * Need to pull our outstanding extents and set journal_info to NULL so
7534                  * that anything that needs to check if there's a transction doesn't get
7535                  * confused.
7536                  */
7537                 dio_data = current->journal_info;
7538                 current->journal_info = NULL;
7539         }
7540
7541         /*
7542          * If this errors out it's because we couldn't invalidate pagecache for
7543          * this range and we need to fallback to buffered.
7544          */
7545         if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
7546                                create)) {
7547                 ret = -ENOTBLK;
7548                 goto err;
7549         }
7550
7551         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
7552         if (IS_ERR(em)) {
7553                 ret = PTR_ERR(em);
7554                 goto unlock_err;
7555         }
7556
7557         /*
7558          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7559          * io.  INLINE is special, and we could probably kludge it in here, but
7560          * it's still buffered so for safety lets just fall back to the generic
7561          * buffered path.
7562          *
7563          * For COMPRESSED we _have_ to read the entire extent in so we can
7564          * decompress it, so there will be buffering required no matter what we
7565          * do, so go ahead and fallback to buffered.
7566          *
7567          * We return -ENOTBLK because thats what makes DIO go ahead and go back
7568          * to buffered IO.  Don't blame me, this is the price we pay for using
7569          * the generic code.
7570          */
7571         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7572             em->block_start == EXTENT_MAP_INLINE) {
7573                 free_extent_map(em);
7574                 ret = -ENOTBLK;
7575                 goto unlock_err;
7576         }
7577
7578         /* Just a good old fashioned hole, return */
7579         if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7580                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7581                 free_extent_map(em);
7582                 goto unlock_err;
7583         }
7584
7585         /*
7586          * We don't allocate a new extent in the following cases
7587          *
7588          * 1) The inode is marked as NODATACOW.  In this case we'll just use the
7589          * existing extent.
7590          * 2) The extent is marked as PREALLOC.  We're good to go here and can
7591          * just use the extent.
7592          *
7593          */
7594         if (!create) {
7595                 len = min(len, em->len - (start - em->start));
7596                 lockstart = start + len;
7597                 goto unlock;
7598         }
7599
7600         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7601             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7602              em->block_start != EXTENT_MAP_HOLE)) {
7603                 int type;
7604                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7605
7606                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7607                         type = BTRFS_ORDERED_PREALLOC;
7608                 else
7609                         type = BTRFS_ORDERED_NOCOW;
7610                 len = min(len, em->len - (start - em->start));
7611                 block_start = em->block_start + (start - em->start);
7612
7613                 if (can_nocow_extent(inode, start, &len, &orig_start,
7614                                      &orig_block_len, &ram_bytes) == 1) {
7615                         if (type == BTRFS_ORDERED_PREALLOC) {
7616                                 free_extent_map(em);
7617                                 em = create_pinned_em(inode, start, len,
7618                                                        orig_start,
7619                                                        block_start, len,
7620                                                        orig_block_len,
7621                                                        ram_bytes, type);
7622                                 if (IS_ERR(em)) {
7623                                         ret = PTR_ERR(em);
7624                                         goto unlock_err;
7625                                 }
7626                         }
7627
7628                         ret = btrfs_add_ordered_extent_dio(inode, start,
7629                                            block_start, len, len, type);
7630                         if (ret) {
7631                                 free_extent_map(em);
7632                                 goto unlock_err;
7633                         }
7634                         goto unlock;
7635                 }
7636         }
7637
7638         /*
7639          * this will cow the extent, reset the len in case we changed
7640          * it above
7641          */
7642         len = bh_result->b_size;
7643         free_extent_map(em);
7644         em = btrfs_new_extent_direct(inode, start, len);
7645         if (IS_ERR(em)) {
7646                 ret = PTR_ERR(em);
7647                 goto unlock_err;
7648         }
7649         len = min(len, em->len - (start - em->start));
7650 unlock:
7651         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7652                 inode->i_blkbits;
7653         bh_result->b_size = len;
7654         bh_result->b_bdev = em->bdev;
7655         set_buffer_mapped(bh_result);
7656         if (create) {
7657                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7658                         set_buffer_new(bh_result);
7659
7660                 /*
7661                  * Need to update the i_size under the extent lock so buffered
7662                  * readers will get the updated i_size when we unlock.
7663                  */
7664                 if (start + len > i_size_read(inode))
7665                         i_size_write(inode, start + len);
7666
7667                 adjust_dio_outstanding_extents(inode, dio_data, len);
7668                 btrfs_free_reserved_data_space(inode, start, len);
7669                 WARN_ON(dio_data->reserve < len);
7670                 dio_data->reserve -= len;
7671                 dio_data->unsubmitted_oe_range_end = start + len;
7672                 current->journal_info = dio_data;
7673         }
7674
7675         /*
7676          * In the case of write we need to clear and unlock the entire range,
7677          * in the case of read we need to unlock only the end area that we
7678          * aren't using if there is any left over space.
7679          */
7680         if (lockstart < lockend) {
7681                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7682                                  lockend, unlock_bits, 1, 0,
7683                                  &cached_state, GFP_NOFS);
7684         } else {
7685                 free_extent_state(cached_state);
7686         }
7687
7688         free_extent_map(em);
7689
7690         return 0;
7691
7692 unlock_err:
7693         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7694                          unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7695 err:
7696         if (dio_data)
7697                 current->journal_info = dio_data;
7698         /*
7699          * Compensate the delalloc release we do in btrfs_direct_IO() when we
7700          * write less data then expected, so that we don't underflow our inode's
7701          * outstanding extents counter.
7702          */
7703         if (create && dio_data)
7704                 adjust_dio_outstanding_extents(inode, dio_data, len);
7705
7706         return ret;
7707 }
7708
7709 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7710                                         int rw, int mirror_num)
7711 {
7712         struct btrfs_root *root = BTRFS_I(inode)->root;
7713         int ret;
7714
7715         BUG_ON(rw & REQ_WRITE);
7716
7717         bio_get(bio);
7718
7719         ret = btrfs_bio_wq_end_io(root->fs_info, bio,
7720                                   BTRFS_WQ_ENDIO_DIO_REPAIR);
7721         if (ret)
7722                 goto err;
7723
7724         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
7725 err:
7726         bio_put(bio);
7727         return ret;
7728 }
7729
7730 static int btrfs_check_dio_repairable(struct inode *inode,
7731                                       struct bio *failed_bio,
7732                                       struct io_failure_record *failrec,
7733                                       int failed_mirror)
7734 {
7735         int num_copies;
7736
7737         num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
7738                                       failrec->logical, failrec->len);
7739         if (num_copies == 1) {
7740                 /*
7741                  * we only have a single copy of the data, so don't bother with
7742                  * all the retry and error correction code that follows. no
7743                  * matter what the error is, it is very likely to persist.
7744                  */
7745                 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
7746                          num_copies, failrec->this_mirror, failed_mirror);
7747                 return 0;
7748         }
7749
7750         failrec->failed_mirror = failed_mirror;
7751         failrec->this_mirror++;
7752         if (failrec->this_mirror == failed_mirror)
7753                 failrec->this_mirror++;
7754
7755         if (failrec->this_mirror > num_copies) {
7756                 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
7757                          num_copies, failrec->this_mirror, failed_mirror);
7758                 return 0;
7759         }
7760
7761         return 1;
7762 }
7763
7764 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7765                           struct page *page, u64 start, u64 end,
7766                           int failed_mirror, bio_end_io_t *repair_endio,
7767                           void *repair_arg)
7768 {
7769         struct io_failure_record *failrec;
7770         struct bio *bio;
7771         int isector;
7772         int read_mode;
7773         int ret;
7774
7775         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
7776
7777         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7778         if (ret)
7779                 return ret;
7780
7781         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7782                                          failed_mirror);
7783         if (!ret) {
7784                 free_io_failure(inode, failrec);
7785                 return -EIO;
7786         }
7787
7788         if (failed_bio->bi_vcnt > 1)
7789                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
7790         else
7791                 read_mode = READ_SYNC;
7792
7793         isector = start - btrfs_io_bio(failed_bio)->logical;
7794         isector >>= inode->i_sb->s_blocksize_bits;
7795         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7796                                       0, isector, repair_endio, repair_arg);
7797         if (!bio) {
7798                 free_io_failure(inode, failrec);
7799                 return -EIO;
7800         }
7801
7802         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7803                     "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7804                     read_mode, failrec->this_mirror, failrec->in_validation);
7805
7806         ret = submit_dio_repair_bio(inode, bio, read_mode,
7807                                     failrec->this_mirror);
7808         if (ret) {
7809                 free_io_failure(inode, failrec);
7810                 bio_put(bio);
7811         }
7812
7813         return ret;
7814 }
7815
7816 struct btrfs_retry_complete {
7817         struct completion done;
7818         struct inode *inode;
7819         u64 start;
7820         int uptodate;
7821 };
7822
7823 static void btrfs_retry_endio_nocsum(struct bio *bio)
7824 {
7825         struct btrfs_retry_complete *done = bio->bi_private;
7826         struct bio_vec *bvec;
7827         int i;
7828
7829         if (bio->bi_error)
7830                 goto end;
7831
7832         done->uptodate = 1;
7833         bio_for_each_segment_all(bvec, bio, i)
7834                 clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
7835 end:
7836         complete(&done->done);
7837         bio_put(bio);
7838 }
7839
7840 static int __btrfs_correct_data_nocsum(struct inode *inode,
7841                                        struct btrfs_io_bio *io_bio)
7842 {
7843         struct bio_vec *bvec;
7844         struct btrfs_retry_complete done;
7845         u64 start;
7846         int i;
7847         int ret;
7848
7849         start = io_bio->logical;
7850         done.inode = inode;
7851
7852         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7853 try_again:
7854                 done.uptodate = 0;
7855                 done.start = start;
7856                 init_completion(&done.done);
7857
7858                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7859                                      start + bvec->bv_len - 1,
7860                                      io_bio->mirror_num,
7861                                      btrfs_retry_endio_nocsum, &done);
7862                 if (ret)
7863                         return ret;
7864
7865                 wait_for_completion(&done.done);
7866
7867                 if (!done.uptodate) {
7868                         /* We might have another mirror, so try again */
7869                         goto try_again;
7870                 }
7871
7872                 start += bvec->bv_len;
7873         }
7874
7875         return 0;
7876 }
7877
7878 static void btrfs_retry_endio(struct bio *bio)
7879 {
7880         struct btrfs_retry_complete *done = bio->bi_private;
7881         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7882         struct bio_vec *bvec;
7883         int uptodate;
7884         int ret;
7885         int i;
7886
7887         if (bio->bi_error)
7888                 goto end;
7889
7890         uptodate = 1;
7891         bio_for_each_segment_all(bvec, bio, i) {
7892                 ret = __readpage_endio_check(done->inode, io_bio, i,
7893                                              bvec->bv_page, 0,
7894                                              done->start, bvec->bv_len);
7895                 if (!ret)
7896                         clean_io_failure(done->inode, done->start,
7897                                          bvec->bv_page, 0);
7898                 else
7899                         uptodate = 0;
7900         }
7901
7902         done->uptodate = uptodate;
7903 end:
7904         complete(&done->done);
7905         bio_put(bio);
7906 }
7907
7908 static int __btrfs_subio_endio_read(struct inode *inode,
7909                                     struct btrfs_io_bio *io_bio, int err)
7910 {
7911         struct bio_vec *bvec;
7912         struct btrfs_retry_complete done;
7913         u64 start;
7914         u64 offset = 0;
7915         int i;
7916         int ret;
7917
7918         err = 0;
7919         start = io_bio->logical;
7920         done.inode = inode;
7921
7922         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7923                 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
7924                                              0, start, bvec->bv_len);
7925                 if (likely(!ret))
7926                         goto next;
7927 try_again:
7928                 done.uptodate = 0;
7929                 done.start = start;
7930                 init_completion(&done.done);
7931
7932                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7933                                      start + bvec->bv_len - 1,
7934                                      io_bio->mirror_num,
7935                                      btrfs_retry_endio, &done);
7936                 if (ret) {
7937                         err = ret;
7938                         goto next;
7939                 }
7940
7941                 wait_for_completion(&done.done);
7942
7943                 if (!done.uptodate) {
7944                         /* We might have another mirror, so try again */
7945                         goto try_again;
7946                 }
7947 next:
7948                 offset += bvec->bv_len;
7949                 start += bvec->bv_len;
7950         }
7951
7952         return err;
7953 }
7954
7955 static int btrfs_subio_endio_read(struct inode *inode,
7956                                   struct btrfs_io_bio *io_bio, int err)
7957 {
7958         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7959
7960         if (skip_csum) {
7961                 if (unlikely(err))
7962                         return __btrfs_correct_data_nocsum(inode, io_bio);
7963                 else
7964                         return 0;
7965         } else {
7966                 return __btrfs_subio_endio_read(inode, io_bio, err);
7967         }
7968 }
7969
7970 static void btrfs_endio_direct_read(struct bio *bio)
7971 {
7972         struct btrfs_dio_private *dip = bio->bi_private;
7973         struct inode *inode = dip->inode;
7974         struct bio *dio_bio;
7975         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7976         int err = bio->bi_error;
7977
7978         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
7979                 err = btrfs_subio_endio_read(inode, io_bio, err);
7980
7981         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
7982                       dip->logical_offset + dip->bytes - 1);
7983         dio_bio = dip->dio_bio;
7984
7985         kfree(dip);
7986
7987         dio_end_io(dio_bio, bio->bi_error);
7988
7989         if (io_bio->end_io)
7990                 io_bio->end_io(io_bio, err);
7991         bio_put(bio);
7992 }
7993
7994 static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
7995                                                     const u64 offset,
7996                                                     const u64 bytes,
7997                                                     const int uptodate)
7998 {
7999         struct btrfs_root *root = BTRFS_I(inode)->root;
8000         struct btrfs_ordered_extent *ordered = NULL;
8001         u64 ordered_offset = offset;
8002         u64 ordered_bytes = bytes;
8003         int ret;
8004
8005 again:
8006         ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8007                                                    &ordered_offset,
8008                                                    ordered_bytes,
8009                                                    uptodate);
8010         if (!ret)
8011                 goto out_test;
8012
8013         btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
8014                         finish_ordered_fn, NULL, NULL);
8015         btrfs_queue_work(root->fs_info->endio_write_workers,
8016                          &ordered->work);
8017 out_test:
8018         /*
8019          * our bio might span multiple ordered extents.  If we haven't
8020          * completed the accounting for the whole dio, go back and try again
8021          */
8022         if (ordered_offset < offset + bytes) {
8023                 ordered_bytes = offset + bytes - ordered_offset;
8024                 ordered = NULL;
8025                 goto again;
8026         }
8027 }
8028
8029 static void btrfs_endio_direct_write(struct bio *bio)
8030 {
8031         struct btrfs_dio_private *dip = bio->bi_private;
8032         struct bio *dio_bio = dip->dio_bio;
8033
8034         btrfs_endio_direct_write_update_ordered(dip->inode,
8035                                                 dip->logical_offset,
8036                                                 dip->bytes,
8037                                                 !bio->bi_error);
8038
8039         kfree(dip);
8040
8041         dio_end_io(dio_bio, bio->bi_error);
8042         bio_put(bio);
8043 }
8044
8045 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
8046                                     struct bio *bio, int mirror_num,
8047                                     unsigned long bio_flags, u64 offset)
8048 {
8049         int ret;
8050         struct btrfs_root *root = BTRFS_I(inode)->root;
8051         ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
8052         BUG_ON(ret); /* -ENOMEM */
8053         return 0;
8054 }
8055
8056 static void btrfs_end_dio_bio(struct bio *bio)
8057 {
8058         struct btrfs_dio_private *dip = bio->bi_private;
8059         int err = bio->bi_error;
8060
8061         if (err)
8062                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8063                            "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
8064                            btrfs_ino(dip->inode), bio->bi_rw,
8065                            (unsigned long long)bio->bi_iter.bi_sector,
8066                            bio->bi_iter.bi_size, err);
8067
8068         if (dip->subio_endio)
8069                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
8070
8071         if (err) {
8072                 dip->errors = 1;
8073
8074                 /*
8075                  * before atomic variable goto zero, we must make sure
8076                  * dip->errors is perceived to be set.
8077                  */
8078                 smp_mb__before_atomic();
8079         }
8080
8081         /* if there are more bios still pending for this dio, just exit */
8082         if (!atomic_dec_and_test(&dip->pending_bios))
8083                 goto out;
8084
8085         if (dip->errors) {
8086                 bio_io_error(dip->orig_bio);
8087         } else {
8088                 dip->dio_bio->bi_error = 0;
8089                 bio_endio(dip->orig_bio);
8090         }
8091 out:
8092         bio_put(bio);
8093 }
8094
8095 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
8096                                        u64 first_sector, gfp_t gfp_flags)
8097 {
8098         struct bio *bio;
8099         bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
8100         if (bio)
8101                 bio_associate_current(bio);
8102         return bio;
8103 }
8104
8105 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
8106                                                  struct inode *inode,
8107                                                  struct btrfs_dio_private *dip,
8108                                                  struct bio *bio,
8109                                                  u64 file_offset)
8110 {
8111         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8112         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8113         int ret;
8114
8115         /*
8116          * We load all the csum data we need when we submit
8117          * the first bio to reduce the csum tree search and
8118          * contention.
8119          */
8120         if (dip->logical_offset == file_offset) {
8121                 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
8122                                                 file_offset);
8123                 if (ret)
8124                         return ret;
8125         }
8126
8127         if (bio == dip->orig_bio)
8128                 return 0;
8129
8130         file_offset -= dip->logical_offset;
8131         file_offset >>= inode->i_sb->s_blocksize_bits;
8132         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8133
8134         return 0;
8135 }
8136
8137 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
8138                                          int rw, u64 file_offset, int skip_sum,
8139                                          int async_submit)
8140 {
8141         struct btrfs_dio_private *dip = bio->bi_private;
8142         int write = rw & REQ_WRITE;
8143         struct btrfs_root *root = BTRFS_I(inode)->root;
8144         int ret;
8145
8146         if (async_submit)
8147                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8148
8149         bio_get(bio);
8150
8151         if (!write) {
8152                 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
8153                                 BTRFS_WQ_ENDIO_DATA);
8154                 if (ret)
8155                         goto err;
8156         }
8157
8158         if (skip_sum)
8159                 goto map;
8160
8161         if (write && async_submit) {
8162                 ret = btrfs_wq_submit_bio(root->fs_info,
8163                                    inode, rw, bio, 0, 0,
8164                                    file_offset,
8165                                    __btrfs_submit_bio_start_direct_io,
8166                                    __btrfs_submit_bio_done);
8167                 goto err;
8168         } else if (write) {
8169                 /*
8170                  * If we aren't doing async submit, calculate the csum of the
8171                  * bio now.
8172                  */
8173                 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
8174                 if (ret)
8175                         goto err;
8176         } else {
8177                 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
8178                                                      file_offset);
8179                 if (ret)
8180                         goto err;
8181         }
8182 map:
8183         ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
8184 err:
8185         bio_put(bio);
8186         return ret;
8187 }
8188
8189 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8190                                     int skip_sum)
8191 {
8192         struct inode *inode = dip->inode;
8193         struct btrfs_root *root = BTRFS_I(inode)->root;
8194         struct bio *bio;
8195         struct bio *orig_bio = dip->orig_bio;
8196         struct bio_vec *bvec = orig_bio->bi_io_vec;
8197         u64 start_sector = orig_bio->bi_iter.bi_sector;
8198         u64 file_offset = dip->logical_offset;
8199         u64 submit_len = 0;
8200         u64 map_length;
8201         int nr_pages = 0;
8202         int ret;
8203         int async_submit = 0;
8204
8205         map_length = orig_bio->bi_iter.bi_size;
8206         ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
8207                               &map_length, NULL, 0);
8208         if (ret)
8209                 return -EIO;
8210
8211         if (map_length >= orig_bio->bi_iter.bi_size) {
8212                 bio = orig_bio;
8213                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8214                 goto submit;
8215         }
8216
8217         /* async crcs make it difficult to collect full stripe writes. */
8218         if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8219                 async_submit = 0;
8220         else
8221                 async_submit = 1;
8222
8223         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
8224         if (!bio)
8225                 return -ENOMEM;
8226
8227         bio->bi_private = dip;
8228         bio->bi_end_io = btrfs_end_dio_bio;
8229         btrfs_io_bio(bio)->logical = file_offset;
8230         atomic_inc(&dip->pending_bios);
8231
8232         while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
8233                 if (map_length < submit_len + bvec->bv_len ||
8234                     bio_add_page(bio, bvec->bv_page, bvec->bv_len,
8235                                  bvec->bv_offset) < bvec->bv_len) {
8236                         /*
8237                          * inc the count before we submit the bio so
8238                          * we know the end IO handler won't happen before
8239                          * we inc the count. Otherwise, the dip might get freed
8240                          * before we're done setting it up
8241                          */
8242                         atomic_inc(&dip->pending_bios);
8243                         ret = __btrfs_submit_dio_bio(bio, inode, rw,
8244                                                      file_offset, skip_sum,
8245                                                      async_submit);
8246                         if (ret) {
8247                                 bio_put(bio);
8248                                 atomic_dec(&dip->pending_bios);
8249                                 goto out_err;
8250                         }
8251
8252                         start_sector += submit_len >> 9;
8253                         file_offset += submit_len;
8254
8255                         submit_len = 0;
8256                         nr_pages = 0;
8257
8258                         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8259                                                   start_sector, GFP_NOFS);
8260                         if (!bio)
8261                                 goto out_err;
8262                         bio->bi_private = dip;
8263                         bio->bi_end_io = btrfs_end_dio_bio;
8264                         btrfs_io_bio(bio)->logical = file_offset;
8265
8266                         map_length = orig_bio->bi_iter.bi_size;
8267                         ret = btrfs_map_block(root->fs_info, rw,
8268                                               start_sector << 9,
8269                                               &map_length, NULL, 0);
8270                         if (ret) {
8271                                 bio_put(bio);
8272                                 goto out_err;
8273                         }
8274                 } else {
8275                         submit_len += bvec->bv_len;
8276                         nr_pages++;
8277                         bvec++;
8278                 }
8279         }
8280
8281 submit:
8282         ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
8283                                      async_submit);
8284         if (!ret)
8285                 return 0;
8286
8287         bio_put(bio);
8288 out_err:
8289         dip->errors = 1;
8290         /*
8291          * before atomic variable goto zero, we must
8292          * make sure dip->errors is perceived to be set.
8293          */
8294         smp_mb__before_atomic();
8295         if (atomic_dec_and_test(&dip->pending_bios))
8296                 bio_io_error(dip->orig_bio);
8297
8298         /* bio_end_io() will handle error, so we needn't return it */
8299         return 0;
8300 }
8301
8302 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8303                                 struct inode *inode, loff_t file_offset)
8304 {
8305         struct btrfs_dio_private *dip = NULL;
8306         struct bio *io_bio = NULL;
8307         struct btrfs_io_bio *btrfs_bio;
8308         int skip_sum;
8309         int write = rw & REQ_WRITE;
8310         int ret = 0;
8311
8312         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8313
8314         io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
8315         if (!io_bio) {
8316                 ret = -ENOMEM;
8317                 goto free_ordered;
8318         }
8319
8320         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8321         if (!dip) {
8322                 ret = -ENOMEM;
8323                 goto free_ordered;
8324         }
8325
8326         dip->private = dio_bio->bi_private;
8327         dip->inode = inode;
8328         dip->logical_offset = file_offset;
8329         dip->bytes = dio_bio->bi_iter.bi_size;
8330         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8331         io_bio->bi_private = dip;
8332         dip->orig_bio = io_bio;
8333         dip->dio_bio = dio_bio;
8334         atomic_set(&dip->pending_bios, 0);
8335         btrfs_bio = btrfs_io_bio(io_bio);
8336         btrfs_bio->logical = file_offset;
8337
8338         if (write) {
8339                 io_bio->bi_end_io = btrfs_endio_direct_write;
8340         } else {
8341                 io_bio->bi_end_io = btrfs_endio_direct_read;
8342                 dip->subio_endio = btrfs_subio_endio_read;
8343         }
8344
8345         /*
8346          * Reset the range for unsubmitted ordered extents (to a 0 length range)
8347          * even if we fail to submit a bio, because in such case we do the
8348          * corresponding error handling below and it must not be done a second
8349          * time by btrfs_direct_IO().
8350          */
8351         if (write) {
8352                 struct btrfs_dio_data *dio_data = current->journal_info;
8353
8354                 dio_data->unsubmitted_oe_range_end = dip->logical_offset +
8355                         dip->bytes;
8356                 dio_data->unsubmitted_oe_range_start =
8357                         dio_data->unsubmitted_oe_range_end;
8358         }
8359
8360         ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
8361         if (!ret)
8362                 return;
8363
8364         if (btrfs_bio->end_io)
8365                 btrfs_bio->end_io(btrfs_bio, ret);
8366
8367 free_ordered:
8368         /*
8369          * If we arrived here it means either we failed to submit the dip
8370          * or we either failed to clone the dio_bio or failed to allocate the
8371          * dip. If we cloned the dio_bio and allocated the dip, we can just
8372          * call bio_endio against our io_bio so that we get proper resource
8373          * cleanup if we fail to submit the dip, otherwise, we must do the
8374          * same as btrfs_endio_direct_[write|read] because we can't call these
8375          * callbacks - they require an allocated dip and a clone of dio_bio.
8376          */
8377         if (io_bio && dip) {
8378                 io_bio->bi_error = -EIO;
8379                 bio_endio(io_bio);
8380                 /*
8381                  * The end io callbacks free our dip, do the final put on io_bio
8382                  * and all the cleanup and final put for dio_bio (through
8383                  * dio_end_io()).
8384                  */
8385                 dip = NULL;
8386                 io_bio = NULL;
8387         } else {
8388                 if (write)
8389                         btrfs_endio_direct_write_update_ordered(inode,
8390                                                 file_offset,
8391                                                 dio_bio->bi_iter.bi_size,
8392                                                 0);
8393                 else
8394                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8395                               file_offset + dio_bio->bi_iter.bi_size - 1);
8396
8397                 dio_bio->bi_error = -EIO;
8398                 /*
8399                  * Releases and cleans up our dio_bio, no need to bio_put()
8400                  * nor bio_endio()/bio_io_error() against dio_bio.
8401                  */
8402                 dio_end_io(dio_bio, ret);
8403         }
8404         if (io_bio)
8405                 bio_put(io_bio);
8406         kfree(dip);
8407 }
8408
8409 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
8410                         const struct iov_iter *iter, loff_t offset)
8411 {
8412         int seg;
8413         int i;
8414         unsigned blocksize_mask = root->sectorsize - 1;
8415         ssize_t retval = -EINVAL;
8416
8417         if (offset & blocksize_mask)
8418                 goto out;
8419
8420         if (iov_iter_alignment(iter) & blocksize_mask)
8421                 goto out;
8422
8423         /* If this is a write we don't need to check anymore */
8424         if (iov_iter_rw(iter) == WRITE)
8425                 return 0;
8426         /*
8427          * Check to make sure we don't have duplicate iov_base's in this
8428          * iovec, if so return EINVAL, otherwise we'll get csum errors
8429          * when reading back.
8430          */
8431         for (seg = 0; seg < iter->nr_segs; seg++) {
8432                 for (i = seg + 1; i < iter->nr_segs; i++) {
8433                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8434                                 goto out;
8435                 }
8436         }
8437         retval = 0;
8438 out:
8439         return retval;
8440 }
8441
8442 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8443                                loff_t offset)
8444 {
8445         struct file *file = iocb->ki_filp;
8446         struct inode *inode = file->f_mapping->host;
8447         struct btrfs_root *root = BTRFS_I(inode)->root;
8448         struct btrfs_dio_data dio_data = { 0 };
8449         size_t count = 0;
8450         int flags = 0;
8451         bool wakeup = true;
8452         bool relock = false;
8453         ssize_t ret;
8454
8455         if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
8456                 return 0;
8457
8458         inode_dio_begin(inode);
8459         smp_mb__after_atomic();
8460
8461         /*
8462          * The generic stuff only does filemap_write_and_wait_range, which
8463          * isn't enough if we've written compressed pages to this area, so
8464          * we need to flush the dirty pages again to make absolutely sure
8465          * that any outstanding dirty pages are on disk.
8466          */
8467         count = iov_iter_count(iter);
8468         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8469                      &BTRFS_I(inode)->runtime_flags))
8470                 filemap_fdatawrite_range(inode->i_mapping, offset,
8471                                          offset + count - 1);
8472
8473         if (iov_iter_rw(iter) == WRITE) {
8474                 /*
8475                  * If the write DIO is beyond the EOF, we need update
8476                  * the isize, but it is protected by i_mutex. So we can
8477                  * not unlock the i_mutex at this case.
8478                  */
8479                 if (offset + count <= inode->i_size) {
8480                         mutex_unlock(&inode->i_mutex);
8481                         relock = true;
8482                 }
8483                 ret = btrfs_delalloc_reserve_space(inode, offset, count);
8484                 if (ret)
8485                         goto out;
8486                 dio_data.outstanding_extents = div64_u64(count +
8487                                                 BTRFS_MAX_EXTENT_SIZE - 1,
8488                                                 BTRFS_MAX_EXTENT_SIZE);
8489
8490                 /*
8491                  * We need to know how many extents we reserved so that we can
8492                  * do the accounting properly if we go over the number we
8493                  * originally calculated.  Abuse current->journal_info for this.
8494                  */
8495                 dio_data.reserve = round_up(count, root->sectorsize);
8496                 dio_data.unsubmitted_oe_range_start = (u64)offset;
8497                 dio_data.unsubmitted_oe_range_end = (u64)offset;
8498                 current->journal_info = &dio_data;
8499         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8500                                      &BTRFS_I(inode)->runtime_flags)) {
8501                 inode_dio_end(inode);
8502                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8503                 wakeup = false;
8504         }
8505
8506         ret = __blockdev_direct_IO(iocb, inode,
8507                                    BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
8508                                    iter, offset, btrfs_get_blocks_direct, NULL,
8509                                    btrfs_submit_direct, flags);
8510         if (iov_iter_rw(iter) == WRITE) {
8511                 current->journal_info = NULL;
8512                 if (ret < 0 && ret != -EIOCBQUEUED) {
8513                         if (dio_data.reserve)
8514                                 btrfs_delalloc_release_space(inode, offset,
8515                                                              dio_data.reserve);
8516                         /*
8517                          * On error we might have left some ordered extents
8518                          * without submitting corresponding bios for them, so
8519                          * cleanup them up to avoid other tasks getting them
8520                          * and waiting for them to complete forever.
8521                          */
8522                         if (dio_data.unsubmitted_oe_range_start <
8523                             dio_data.unsubmitted_oe_range_end)
8524                                 btrfs_endio_direct_write_update_ordered(inode,
8525                                         dio_data.unsubmitted_oe_range_start,
8526                                         dio_data.unsubmitted_oe_range_end -
8527                                         dio_data.unsubmitted_oe_range_start,
8528                                         0);
8529                 } else if (ret >= 0 && (size_t)ret < count)
8530                         btrfs_delalloc_release_space(inode, offset,
8531                                                      count - (size_t)ret);
8532         }
8533 out:
8534         if (wakeup)
8535                 inode_dio_end(inode);
8536         if (relock)
8537                 mutex_lock(&inode->i_mutex);
8538
8539         return ret;
8540 }
8541
8542 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8543
8544 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8545                 __u64 start, __u64 len)
8546 {
8547         int     ret;
8548
8549         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8550         if (ret)
8551                 return ret;
8552
8553         return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8554 }
8555
8556 int btrfs_readpage(struct file *file, struct page *page)
8557 {
8558         struct extent_io_tree *tree;
8559         tree = &BTRFS_I(page->mapping->host)->io_tree;
8560         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8561 }
8562
8563 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8564 {
8565         struct extent_io_tree *tree;
8566
8567
8568         if (current->flags & PF_MEMALLOC) {
8569                 redirty_page_for_writepage(wbc, page);
8570                 unlock_page(page);
8571                 return 0;
8572         }
8573         tree = &BTRFS_I(page->mapping->host)->io_tree;
8574         return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
8575 }
8576
8577 static int btrfs_writepages(struct address_space *mapping,
8578                             struct writeback_control *wbc)
8579 {
8580         struct extent_io_tree *tree;
8581
8582         tree = &BTRFS_I(mapping->host)->io_tree;
8583         return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
8584 }
8585
8586 static int
8587 btrfs_readpages(struct file *file, struct address_space *mapping,
8588                 struct list_head *pages, unsigned nr_pages)
8589 {
8590         struct extent_io_tree *tree;
8591         tree = &BTRFS_I(mapping->host)->io_tree;
8592         return extent_readpages(tree, mapping, pages, nr_pages,
8593                                 btrfs_get_extent);
8594 }
8595 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8596 {
8597         struct extent_io_tree *tree;
8598         struct extent_map_tree *map;
8599         int ret;
8600
8601         tree = &BTRFS_I(page->mapping->host)->io_tree;
8602         map = &BTRFS_I(page->mapping->host)->extent_tree;
8603         ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8604         if (ret == 1) {
8605                 ClearPagePrivate(page);
8606                 set_page_private(page, 0);
8607                 page_cache_release(page);
8608         }
8609         return ret;
8610 }
8611
8612 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8613 {
8614         if (PageWriteback(page) || PageDirty(page))
8615                 return 0;
8616         return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
8617 }
8618
8619 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8620                                  unsigned int length)
8621 {
8622         struct inode *inode = page->mapping->host;
8623         struct extent_io_tree *tree;
8624         struct btrfs_ordered_extent *ordered;
8625         struct extent_state *cached_state = NULL;
8626         u64 page_start = page_offset(page);
8627         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
8628         int inode_evicting = inode->i_state & I_FREEING;
8629
8630         /*
8631          * we have the page locked, so new writeback can't start,
8632          * and the dirty bit won't be cleared while we are here.
8633          *
8634          * Wait for IO on this page so that we can safely clear
8635          * the PagePrivate2 bit and do ordered accounting
8636          */
8637         wait_on_page_writeback(page);
8638
8639         tree = &BTRFS_I(inode)->io_tree;
8640         if (offset) {
8641                 btrfs_releasepage(page, GFP_NOFS);
8642                 return;
8643         }
8644
8645         if (!inode_evicting)
8646                 lock_extent_bits(tree, page_start, page_end, &cached_state);
8647         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8648         if (ordered) {
8649                 /*
8650                  * IO on this page will never be started, so we need
8651                  * to account for any ordered extents now
8652                  */
8653                 if (!inode_evicting)
8654                         clear_extent_bit(tree, page_start, page_end,
8655                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8656                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8657                                          EXTENT_DEFRAG, 1, 0, &cached_state,
8658                                          GFP_NOFS);
8659                 /*
8660                  * whoever cleared the private bit is responsible
8661                  * for the finish_ordered_io
8662                  */
8663                 if (TestClearPagePrivate2(page)) {
8664                         struct btrfs_ordered_inode_tree *tree;
8665                         u64 new_len;
8666
8667                         tree = &BTRFS_I(inode)->ordered_tree;
8668
8669                         spin_lock_irq(&tree->lock);
8670                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8671                         new_len = page_start - ordered->file_offset;
8672                         if (new_len < ordered->truncated_len)
8673                                 ordered->truncated_len = new_len;
8674                         spin_unlock_irq(&tree->lock);
8675
8676                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8677                                                            page_start,
8678                                                            PAGE_CACHE_SIZE, 1))
8679                                 btrfs_finish_ordered_io(ordered);
8680                 }
8681                 btrfs_put_ordered_extent(ordered);
8682                 if (!inode_evicting) {
8683                         cached_state = NULL;
8684                         lock_extent_bits(tree, page_start, page_end,
8685                                          &cached_state);
8686                 }
8687         }
8688
8689         /*
8690          * Qgroup reserved space handler
8691          * Page here will be either
8692          * 1) Already written to disk
8693          *    In this case, its reserved space is released from data rsv map
8694          *    and will be freed by delayed_ref handler finally.
8695          *    So even we call qgroup_free_data(), it won't decrease reserved
8696          *    space.
8697          * 2) Not written to disk
8698          *    This means the reserved space should be freed here.
8699          */
8700         btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE);
8701         if (!inode_evicting) {
8702                 clear_extent_bit(tree, page_start, page_end,
8703                                  EXTENT_LOCKED | EXTENT_DIRTY |
8704                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8705                                  EXTENT_DEFRAG, 1, 1,
8706                                  &cached_state, GFP_NOFS);
8707
8708                 __btrfs_releasepage(page, GFP_NOFS);
8709         }
8710
8711         ClearPageChecked(page);
8712         if (PagePrivate(page)) {
8713                 ClearPagePrivate(page);
8714                 set_page_private(page, 0);
8715                 page_cache_release(page);
8716         }
8717 }
8718
8719 /*
8720  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8721  * called from a page fault handler when a page is first dirtied. Hence we must
8722  * be careful to check for EOF conditions here. We set the page up correctly
8723  * for a written page which means we get ENOSPC checking when writing into
8724  * holes and correct delalloc and unwritten extent mapping on filesystems that
8725  * support these features.
8726  *
8727  * We are not allowed to take the i_mutex here so we have to play games to
8728  * protect against truncate races as the page could now be beyond EOF.  Because
8729  * vmtruncate() writes the inode size before removing pages, once we have the
8730  * page lock we can determine safely if the page is beyond EOF. If it is not
8731  * beyond EOF, then the page is guaranteed safe against truncation until we
8732  * unlock the page.
8733  */
8734 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8735 {
8736         struct page *page = vmf->page;
8737         struct inode *inode = file_inode(vma->vm_file);
8738         struct btrfs_root *root = BTRFS_I(inode)->root;
8739         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8740         struct btrfs_ordered_extent *ordered;
8741         struct extent_state *cached_state = NULL;
8742         char *kaddr;
8743         unsigned long zero_start;
8744         loff_t size;
8745         int ret;
8746         int reserved = 0;
8747         u64 page_start;
8748         u64 page_end;
8749
8750         sb_start_pagefault(inode->i_sb);
8751         page_start = page_offset(page);
8752         page_end = page_start + PAGE_CACHE_SIZE - 1;
8753
8754         ret = btrfs_delalloc_reserve_space(inode, page_start,
8755                                            PAGE_CACHE_SIZE);
8756         if (!ret) {
8757                 ret = file_update_time(vma->vm_file);
8758                 reserved = 1;
8759         }
8760         if (ret) {
8761                 if (ret == -ENOMEM)
8762                         ret = VM_FAULT_OOM;
8763                 else /* -ENOSPC, -EIO, etc */
8764                         ret = VM_FAULT_SIGBUS;
8765                 if (reserved)
8766                         goto out;
8767                 goto out_noreserve;
8768         }
8769
8770         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8771 again:
8772         lock_page(page);
8773         size = i_size_read(inode);
8774
8775         if ((page->mapping != inode->i_mapping) ||
8776             (page_start >= size)) {
8777                 /* page got truncated out from underneath us */
8778                 goto out_unlock;
8779         }
8780         wait_on_page_writeback(page);
8781
8782         lock_extent_bits(io_tree, page_start, page_end, &cached_state);
8783         set_page_extent_mapped(page);
8784
8785         /*
8786          * we can't set the delalloc bits if there are pending ordered
8787          * extents.  Drop our locks and wait for them to finish
8788          */
8789         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8790         if (ordered) {
8791                 unlock_extent_cached(io_tree, page_start, page_end,
8792                                      &cached_state, GFP_NOFS);
8793                 unlock_page(page);
8794                 btrfs_start_ordered_extent(inode, ordered, 1);
8795                 btrfs_put_ordered_extent(ordered);
8796                 goto again;
8797         }
8798
8799         /*
8800          * XXX - page_mkwrite gets called every time the page is dirtied, even
8801          * if it was already dirty, so for space accounting reasons we need to
8802          * clear any delalloc bits for the range we are fixing to save.  There
8803          * is probably a better way to do this, but for now keep consistent with
8804          * prepare_pages in the normal write path.
8805          */
8806         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
8807                           EXTENT_DIRTY | EXTENT_DELALLOC |
8808                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
8809                           0, 0, &cached_state, GFP_NOFS);
8810
8811         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
8812                                         &cached_state);
8813         if (ret) {
8814                 unlock_extent_cached(io_tree, page_start, page_end,
8815                                      &cached_state, GFP_NOFS);
8816                 ret = VM_FAULT_SIGBUS;
8817                 goto out_unlock;
8818         }
8819         ret = 0;
8820
8821         /* page is wholly or partially inside EOF */
8822         if (page_start + PAGE_CACHE_SIZE > size)
8823                 zero_start = size & ~PAGE_CACHE_MASK;
8824         else
8825                 zero_start = PAGE_CACHE_SIZE;
8826
8827         if (zero_start != PAGE_CACHE_SIZE) {
8828                 kaddr = kmap(page);
8829                 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
8830                 flush_dcache_page(page);
8831                 kunmap(page);
8832         }
8833         ClearPageChecked(page);
8834         set_page_dirty(page);
8835         SetPageUptodate(page);
8836
8837         BTRFS_I(inode)->last_trans = root->fs_info->generation;
8838         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
8839         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
8840
8841         unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
8842
8843 out_unlock:
8844         if (!ret) {
8845                 sb_end_pagefault(inode->i_sb);
8846                 return VM_FAULT_LOCKED;
8847         }
8848         unlock_page(page);
8849 out:
8850         btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
8851 out_noreserve:
8852         sb_end_pagefault(inode->i_sb);
8853         return ret;
8854 }
8855
8856 static int btrfs_truncate(struct inode *inode)
8857 {
8858         struct btrfs_root *root = BTRFS_I(inode)->root;
8859         struct btrfs_block_rsv *rsv;
8860         int ret = 0;
8861         int err = 0;
8862         struct btrfs_trans_handle *trans;
8863         u64 mask = root->sectorsize - 1;
8864         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
8865
8866         ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8867                                        (u64)-1);
8868         if (ret)
8869                 return ret;
8870
8871         /*
8872          * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
8873          * 3 things going on here
8874          *
8875          * 1) We need to reserve space for our orphan item and the space to
8876          * delete our orphan item.  Lord knows we don't want to have a dangling
8877          * orphan item because we didn't reserve space to remove it.
8878          *
8879          * 2) We need to reserve space to update our inode.
8880          *
8881          * 3) We need to have something to cache all the space that is going to
8882          * be free'd up by the truncate operation, but also have some slack
8883          * space reserved in case it uses space during the truncate (thank you
8884          * very much snapshotting).
8885          *
8886          * And we need these to all be seperate.  The fact is we can use alot of
8887          * space doing the truncate, and we have no earthly idea how much space
8888          * we will use, so we need the truncate reservation to be seperate so it
8889          * doesn't end up using space reserved for updating the inode or
8890          * removing the orphan item.  We also need to be able to stop the
8891          * transaction and start a new one, which means we need to be able to
8892          * update the inode several times, and we have no idea of knowing how
8893          * many times that will be, so we can't just reserve 1 item for the
8894          * entirety of the opration, so that has to be done seperately as well.
8895          * Then there is the orphan item, which does indeed need to be held on
8896          * to for the whole operation, and we need nobody to touch this reserved
8897          * space except the orphan code.
8898          *
8899          * So that leaves us with
8900          *
8901          * 1) root->orphan_block_rsv - for the orphan deletion.
8902          * 2) rsv - for the truncate reservation, which we will steal from the
8903          * transaction reservation.
8904          * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
8905          * updating the inode.
8906          */
8907         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
8908         if (!rsv)
8909                 return -ENOMEM;
8910         rsv->size = min_size;
8911         rsv->failfast = 1;
8912
8913         /*
8914          * 1 for the truncate slack space
8915          * 1 for updating the inode.
8916          */
8917         trans = btrfs_start_transaction(root, 2);
8918         if (IS_ERR(trans)) {
8919                 err = PTR_ERR(trans);
8920                 goto out;
8921         }
8922
8923         /* Migrate the slack space for the truncate to our reserve */
8924         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
8925                                       min_size);
8926         BUG_ON(ret);
8927
8928         /*
8929          * So if we truncate and then write and fsync we normally would just
8930          * write the extents that changed, which is a problem if we need to
8931          * first truncate that entire inode.  So set this flag so we write out
8932          * all of the extents in the inode to the sync log so we're completely
8933          * safe.
8934          */
8935         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
8936         trans->block_rsv = rsv;
8937
8938         while (1) {
8939                 ret = btrfs_truncate_inode_items(trans, root, inode,
8940                                                  inode->i_size,
8941                                                  BTRFS_EXTENT_DATA_KEY);
8942                 if (ret != -ENOSPC && ret != -EAGAIN) {
8943                         err = ret;
8944                         break;
8945                 }
8946
8947                 trans->block_rsv = &root->fs_info->trans_block_rsv;
8948                 ret = btrfs_update_inode(trans, root, inode);
8949                 if (ret) {
8950                         err = ret;
8951                         break;
8952                 }
8953
8954                 btrfs_end_transaction(trans, root);
8955                 btrfs_btree_balance_dirty(root);
8956
8957                 trans = btrfs_start_transaction(root, 2);
8958                 if (IS_ERR(trans)) {
8959                         ret = err = PTR_ERR(trans);
8960                         trans = NULL;
8961                         break;
8962                 }
8963
8964                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
8965                                               rsv, min_size);
8966                 BUG_ON(ret);    /* shouldn't happen */
8967                 trans->block_rsv = rsv;
8968         }
8969
8970         if (ret == 0 && inode->i_nlink > 0) {
8971                 trans->block_rsv = root->orphan_block_rsv;
8972                 ret = btrfs_orphan_del(trans, inode);
8973                 if (ret)
8974                         err = ret;
8975         }
8976
8977         if (trans) {
8978                 trans->block_rsv = &root->fs_info->trans_block_rsv;
8979                 ret = btrfs_update_inode(trans, root, inode);
8980                 if (ret && !err)
8981                         err = ret;
8982
8983                 ret = btrfs_end_transaction(trans, root);
8984                 btrfs_btree_balance_dirty(root);
8985         }
8986
8987 out:
8988         btrfs_free_block_rsv(root, rsv);
8989
8990         if (ret && !err)
8991                 err = ret;
8992
8993         return err;
8994 }
8995
8996 /*
8997  * create a new subvolume directory/inode (helper for the ioctl).
8998  */
8999 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
9000                              struct btrfs_root *new_root,
9001                              struct btrfs_root *parent_root,
9002                              u64 new_dirid)
9003 {
9004         struct inode *inode;
9005         int err;
9006         u64 index = 0;
9007
9008         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
9009                                 new_dirid, new_dirid,
9010                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
9011                                 &index);
9012         if (IS_ERR(inode))
9013                 return PTR_ERR(inode);
9014         inode->i_op = &btrfs_dir_inode_operations;
9015         inode->i_fop = &btrfs_dir_file_operations;
9016
9017         set_nlink(inode, 1);
9018         btrfs_i_size_write(inode, 0);
9019         unlock_new_inode(inode);
9020
9021         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
9022         if (err)
9023                 btrfs_err(new_root->fs_info,
9024                           "error inheriting subvolume %llu properties: %d",
9025                           new_root->root_key.objectid, err);
9026
9027         err = btrfs_update_inode(trans, new_root, inode);
9028
9029         iput(inode);
9030         return err;
9031 }
9032
9033 struct inode *btrfs_alloc_inode(struct super_block *sb)
9034 {
9035         struct btrfs_inode *ei;
9036         struct inode *inode;
9037
9038         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
9039         if (!ei)
9040                 return NULL;
9041
9042         ei->root = NULL;
9043         ei->generation = 0;
9044         ei->last_trans = 0;
9045         ei->last_sub_trans = 0;
9046         ei->logged_trans = 0;
9047         ei->delalloc_bytes = 0;
9048         ei->defrag_bytes = 0;
9049         ei->disk_i_size = 0;
9050         ei->flags = 0;
9051         ei->csum_bytes = 0;
9052         ei->index_cnt = (u64)-1;
9053         ei->dir_index = 0;
9054         ei->last_unlink_trans = 0;
9055         ei->last_log_commit = 0;
9056
9057         spin_lock_init(&ei->lock);
9058         ei->outstanding_extents = 0;
9059         ei->reserved_extents = 0;
9060
9061         ei->runtime_flags = 0;
9062         ei->force_compress = BTRFS_COMPRESS_NONE;
9063
9064         ei->delayed_node = NULL;
9065
9066         ei->i_otime.tv_sec = 0;
9067         ei->i_otime.tv_nsec = 0;
9068
9069         inode = &ei->vfs_inode;
9070         extent_map_tree_init(&ei->extent_tree);
9071         extent_io_tree_init(&ei->io_tree, &inode->i_data);
9072         extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
9073         ei->io_tree.track_uptodate = 1;
9074         ei->io_failure_tree.track_uptodate = 1;
9075         atomic_set(&ei->sync_writers, 0);
9076         mutex_init(&ei->log_mutex);
9077         mutex_init(&ei->delalloc_mutex);
9078         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
9079         INIT_LIST_HEAD(&ei->delalloc_inodes);
9080         RB_CLEAR_NODE(&ei->rb_node);
9081
9082         return inode;
9083 }
9084
9085 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9086 void btrfs_test_destroy_inode(struct inode *inode)
9087 {
9088         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9089         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9090 }
9091 #endif
9092
9093 static void btrfs_i_callback(struct rcu_head *head)
9094 {
9095         struct inode *inode = container_of(head, struct inode, i_rcu);
9096         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
9097 }
9098
9099 void btrfs_destroy_inode(struct inode *inode)
9100 {
9101         struct btrfs_ordered_extent *ordered;
9102         struct btrfs_root *root = BTRFS_I(inode)->root;
9103
9104         WARN_ON(!hlist_empty(&inode->i_dentry));
9105         WARN_ON(inode->i_data.nrpages);
9106         WARN_ON(BTRFS_I(inode)->outstanding_extents);
9107         WARN_ON(BTRFS_I(inode)->reserved_extents);
9108         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
9109         WARN_ON(BTRFS_I(inode)->csum_bytes);
9110         WARN_ON(BTRFS_I(inode)->defrag_bytes);
9111
9112         /*
9113          * This can happen where we create an inode, but somebody else also
9114          * created the same inode and we need to destroy the one we already
9115          * created.
9116          */
9117         if (!root)
9118                 goto free;
9119
9120         if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
9121                      &BTRFS_I(inode)->runtime_flags)) {
9122                 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
9123                         btrfs_ino(inode));
9124                 atomic_dec(&root->orphan_inodes);
9125         }
9126
9127         while (1) {
9128                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
9129                 if (!ordered)
9130                         break;
9131                 else {
9132                         btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
9133                                 ordered->file_offset, ordered->len);
9134                         btrfs_remove_ordered_extent(inode, ordered);
9135                         btrfs_put_ordered_extent(ordered);
9136                         btrfs_put_ordered_extent(ordered);
9137                 }
9138         }
9139         btrfs_qgroup_check_reserved_leak(inode);
9140         inode_tree_del(inode);
9141         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9142 free:
9143         call_rcu(&inode->i_rcu, btrfs_i_callback);
9144 }
9145
9146 int btrfs_drop_inode(struct inode *inode)
9147 {
9148         struct btrfs_root *root = BTRFS_I(inode)->root;
9149
9150         if (root == NULL)
9151                 return 1;
9152
9153         /* the snap/subvol tree is on deleting */
9154         if (btrfs_root_refs(&root->root_item) == 0)
9155                 return 1;
9156         else
9157                 return generic_drop_inode(inode);
9158 }
9159
9160 static void init_once(void *foo)
9161 {
9162         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9163
9164         inode_init_once(&ei->vfs_inode);
9165 }
9166
9167 void btrfs_destroy_cachep(void)
9168 {
9169         /*
9170          * Make sure all delayed rcu free inodes are flushed before we
9171          * destroy cache.
9172          */
9173         rcu_barrier();
9174         if (btrfs_inode_cachep)
9175                 kmem_cache_destroy(btrfs_inode_cachep);
9176         if (btrfs_trans_handle_cachep)
9177                 kmem_cache_destroy(btrfs_trans_handle_cachep);
9178         if (btrfs_transaction_cachep)
9179                 kmem_cache_destroy(btrfs_transaction_cachep);
9180         if (btrfs_path_cachep)
9181                 kmem_cache_destroy(btrfs_path_cachep);
9182         if (btrfs_free_space_cachep)
9183                 kmem_cache_destroy(btrfs_free_space_cachep);
9184         if (btrfs_delalloc_work_cachep)
9185                 kmem_cache_destroy(btrfs_delalloc_work_cachep);
9186 }
9187
9188 int btrfs_init_cachep(void)
9189 {
9190         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9191                         sizeof(struct btrfs_inode), 0,
9192                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
9193         if (!btrfs_inode_cachep)
9194                 goto fail;
9195
9196         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9197                         sizeof(struct btrfs_trans_handle), 0,
9198                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9199         if (!btrfs_trans_handle_cachep)
9200                 goto fail;
9201
9202         btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9203                         sizeof(struct btrfs_transaction), 0,
9204                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9205         if (!btrfs_transaction_cachep)
9206                 goto fail;
9207
9208         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9209                         sizeof(struct btrfs_path), 0,
9210                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9211         if (!btrfs_path_cachep)
9212                 goto fail;
9213
9214         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9215                         sizeof(struct btrfs_free_space), 0,
9216                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9217         if (!btrfs_free_space_cachep)
9218                 goto fail;
9219
9220         btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
9221                         sizeof(struct btrfs_delalloc_work), 0,
9222                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
9223                         NULL);
9224         if (!btrfs_delalloc_work_cachep)
9225                 goto fail;
9226
9227         return 0;
9228 fail:
9229         btrfs_destroy_cachep();
9230         return -ENOMEM;
9231 }
9232
9233 static int btrfs_getattr(struct vfsmount *mnt,
9234                          struct dentry *dentry, struct kstat *stat)
9235 {
9236         u64 delalloc_bytes;
9237         struct inode *inode = d_inode(dentry);
9238         u32 blocksize = inode->i_sb->s_blocksize;
9239
9240         generic_fillattr(inode, stat);
9241         stat->dev = BTRFS_I(inode)->root->anon_dev;
9242         stat->blksize = PAGE_CACHE_SIZE;
9243
9244         spin_lock(&BTRFS_I(inode)->lock);
9245         delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
9246         spin_unlock(&BTRFS_I(inode)->lock);
9247         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9248                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9249         return 0;
9250 }
9251
9252 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9253                            struct inode *new_dir, struct dentry *new_dentry)
9254 {
9255         struct btrfs_trans_handle *trans;
9256         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9257         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9258         struct inode *new_inode = d_inode(new_dentry);
9259         struct inode *old_inode = d_inode(old_dentry);
9260         struct timespec ctime = CURRENT_TIME;
9261         u64 index = 0;
9262         u64 root_objectid;
9263         int ret;
9264         u64 old_ino = btrfs_ino(old_inode);
9265
9266         if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9267                 return -EPERM;
9268
9269         /* we only allow rename subvolume link between subvolumes */
9270         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9271                 return -EXDEV;
9272
9273         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9274             (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
9275                 return -ENOTEMPTY;
9276
9277         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9278             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9279                 return -ENOTEMPTY;
9280
9281
9282         /* check for collisions, even if the  name isn't there */
9283         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9284                              new_dentry->d_name.name,
9285                              new_dentry->d_name.len);
9286
9287         if (ret) {
9288                 if (ret == -EEXIST) {
9289                         /* we shouldn't get
9290                          * eexist without a new_inode */
9291                         if (WARN_ON(!new_inode)) {
9292                                 return ret;
9293                         }
9294                 } else {
9295                         /* maybe -EOVERFLOW */
9296                         return ret;
9297                 }
9298         }
9299         ret = 0;
9300
9301         /*
9302          * we're using rename to replace one file with another.  Start IO on it
9303          * now so  we don't add too much work to the end of the transaction
9304          */
9305         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9306                 filemap_flush(old_inode->i_mapping);
9307
9308         /* close the racy window with snapshot create/destroy ioctl */
9309         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9310                 down_read(&root->fs_info->subvol_sem);
9311         /*
9312          * We want to reserve the absolute worst case amount of items.  So if
9313          * both inodes are subvols and we need to unlink them then that would
9314          * require 4 item modifications, but if they are both normal inodes it
9315          * would require 5 item modifications, so we'll assume their normal
9316          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9317          * should cover the worst case number of items we'll modify.
9318          */
9319         trans = btrfs_start_transaction(root, 11);
9320         if (IS_ERR(trans)) {
9321                 ret = PTR_ERR(trans);
9322                 goto out_notrans;
9323         }
9324
9325         if (dest != root)
9326                 btrfs_record_root_in_trans(trans, dest);
9327
9328         ret = btrfs_set_inode_index(new_dir, &index);
9329         if (ret)
9330                 goto out_fail;
9331
9332         BTRFS_I(old_inode)->dir_index = 0ULL;
9333         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9334                 /* force full log commit if subvolume involved. */
9335                 btrfs_set_log_full_commit(root->fs_info, trans);
9336         } else {
9337                 ret = btrfs_insert_inode_ref(trans, dest,
9338                                              new_dentry->d_name.name,
9339                                              new_dentry->d_name.len,
9340                                              old_ino,
9341                                              btrfs_ino(new_dir), index);
9342                 if (ret)
9343                         goto out_fail;
9344                 /*
9345                  * this is an ugly little race, but the rename is required
9346                  * to make sure that if we crash, the inode is either at the
9347                  * old name or the new one.  pinning the log transaction lets
9348                  * us make sure we don't allow a log commit to come in after
9349                  * we unlink the name but before we add the new name back in.
9350                  */
9351                 btrfs_pin_log_trans(root);
9352         }
9353
9354         inode_inc_iversion(old_dir);
9355         inode_inc_iversion(new_dir);
9356         inode_inc_iversion(old_inode);
9357         old_dir->i_ctime = old_dir->i_mtime = ctime;
9358         new_dir->i_ctime = new_dir->i_mtime = ctime;
9359         old_inode->i_ctime = ctime;
9360
9361         if (old_dentry->d_parent != new_dentry->d_parent)
9362                 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
9363
9364         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9365                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9366                 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9367                                         old_dentry->d_name.name,
9368                                         old_dentry->d_name.len);
9369         } else {
9370                 ret = __btrfs_unlink_inode(trans, root, old_dir,
9371                                         d_inode(old_dentry),
9372                                         old_dentry->d_name.name,
9373                                         old_dentry->d_name.len);
9374                 if (!ret)
9375                         ret = btrfs_update_inode(trans, root, old_inode);
9376         }
9377         if (ret) {
9378                 btrfs_abort_transaction(trans, root, ret);
9379                 goto out_fail;
9380         }
9381
9382         if (new_inode) {
9383                 inode_inc_iversion(new_inode);
9384                 new_inode->i_ctime = CURRENT_TIME;
9385                 if (unlikely(btrfs_ino(new_inode) ==
9386                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9387                         root_objectid = BTRFS_I(new_inode)->location.objectid;
9388                         ret = btrfs_unlink_subvol(trans, dest, new_dir,
9389                                                 root_objectid,
9390                                                 new_dentry->d_name.name,
9391                                                 new_dentry->d_name.len);
9392                         BUG_ON(new_inode->i_nlink == 0);
9393                 } else {
9394                         ret = btrfs_unlink_inode(trans, dest, new_dir,
9395                                                  d_inode(new_dentry),
9396                                                  new_dentry->d_name.name,
9397                                                  new_dentry->d_name.len);
9398                 }
9399                 if (!ret && new_inode->i_nlink == 0)
9400                         ret = btrfs_orphan_add(trans, d_inode(new_dentry));
9401                 if (ret) {
9402                         btrfs_abort_transaction(trans, root, ret);
9403                         goto out_fail;
9404                 }
9405         }
9406
9407         ret = btrfs_add_link(trans, new_dir, old_inode,
9408                              new_dentry->d_name.name,
9409                              new_dentry->d_name.len, 0, index);
9410         if (ret) {
9411                 btrfs_abort_transaction(trans, root, ret);
9412                 goto out_fail;
9413         }
9414
9415         if (old_inode->i_nlink == 1)
9416                 BTRFS_I(old_inode)->dir_index = index;
9417
9418         if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
9419                 struct dentry *parent = new_dentry->d_parent;
9420                 btrfs_log_new_name(trans, old_inode, old_dir, parent);
9421                 btrfs_end_log_trans(root);
9422         }
9423 out_fail:
9424         btrfs_end_transaction(trans, root);
9425 out_notrans:
9426         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9427                 up_read(&root->fs_info->subvol_sem);
9428
9429         return ret;
9430 }
9431
9432 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9433                          struct inode *new_dir, struct dentry *new_dentry,
9434                          unsigned int flags)
9435 {
9436         if (flags & ~RENAME_NOREPLACE)
9437                 return -EINVAL;
9438
9439         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry);
9440 }
9441
9442 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9443 {
9444         struct btrfs_delalloc_work *delalloc_work;
9445         struct inode *inode;
9446
9447         delalloc_work = container_of(work, struct btrfs_delalloc_work,
9448                                      work);
9449         inode = delalloc_work->inode;
9450         filemap_flush(inode->i_mapping);
9451         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9452                                 &BTRFS_I(inode)->runtime_flags))
9453                 filemap_flush(inode->i_mapping);
9454
9455         if (delalloc_work->delay_iput)
9456                 btrfs_add_delayed_iput(inode);
9457         else
9458                 iput(inode);
9459         complete(&delalloc_work->completion);
9460 }
9461
9462 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9463                                                     int delay_iput)
9464 {
9465         struct btrfs_delalloc_work *work;
9466
9467         work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
9468         if (!work)
9469                 return NULL;
9470
9471         init_completion(&work->completion);
9472         INIT_LIST_HEAD(&work->list);
9473         work->inode = inode;
9474         work->delay_iput = delay_iput;
9475         WARN_ON_ONCE(!inode);
9476         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
9477                         btrfs_run_delalloc_work, NULL, NULL);
9478
9479         return work;
9480 }
9481
9482 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
9483 {
9484         wait_for_completion(&work->completion);
9485         kmem_cache_free(btrfs_delalloc_work_cachep, work);
9486 }
9487
9488 /*
9489  * some fairly slow code that needs optimization. This walks the list
9490  * of all the inodes with pending delalloc and forces them to disk.
9491  */
9492 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
9493                                    int nr)
9494 {
9495         struct btrfs_inode *binode;
9496         struct inode *inode;
9497         struct btrfs_delalloc_work *work, *next;
9498         struct list_head works;
9499         struct list_head splice;
9500         int ret = 0;
9501
9502         INIT_LIST_HEAD(&works);
9503         INIT_LIST_HEAD(&splice);
9504
9505         mutex_lock(&root->delalloc_mutex);
9506         spin_lock(&root->delalloc_lock);
9507         list_splice_init(&root->delalloc_inodes, &splice);
9508         while (!list_empty(&splice)) {
9509                 binode = list_entry(splice.next, struct btrfs_inode,
9510                                     delalloc_inodes);
9511
9512                 list_move_tail(&binode->delalloc_inodes,
9513                                &root->delalloc_inodes);
9514                 inode = igrab(&binode->vfs_inode);
9515                 if (!inode) {
9516                         cond_resched_lock(&root->delalloc_lock);
9517                         continue;
9518                 }
9519                 spin_unlock(&root->delalloc_lock);
9520
9521                 work = btrfs_alloc_delalloc_work(inode, delay_iput);
9522                 if (!work) {
9523                         if (delay_iput)
9524                                 btrfs_add_delayed_iput(inode);
9525                         else
9526                                 iput(inode);
9527                         ret = -ENOMEM;
9528                         goto out;
9529                 }
9530                 list_add_tail(&work->list, &works);
9531                 btrfs_queue_work(root->fs_info->flush_workers,
9532                                  &work->work);
9533                 ret++;
9534                 if (nr != -1 && ret >= nr)
9535                         goto out;
9536                 cond_resched();
9537                 spin_lock(&root->delalloc_lock);
9538         }
9539         spin_unlock(&root->delalloc_lock);
9540
9541 out:
9542         list_for_each_entry_safe(work, next, &works, list) {
9543                 list_del_init(&work->list);
9544                 btrfs_wait_and_free_delalloc_work(work);
9545         }
9546
9547         if (!list_empty_careful(&splice)) {
9548                 spin_lock(&root->delalloc_lock);
9549                 list_splice_tail(&splice, &root->delalloc_inodes);
9550                 spin_unlock(&root->delalloc_lock);
9551         }
9552         mutex_unlock(&root->delalloc_mutex);
9553         return ret;
9554 }
9555
9556 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
9557 {
9558         int ret;
9559
9560         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
9561                 return -EROFS;
9562
9563         ret = __start_delalloc_inodes(root, delay_iput, -1);
9564         if (ret > 0)
9565                 ret = 0;
9566         /*
9567          * the filemap_flush will queue IO into the worker threads, but
9568          * we have to make sure the IO is actually started and that
9569          * ordered extents get created before we return
9570          */
9571         atomic_inc(&root->fs_info->async_submit_draining);
9572         while (atomic_read(&root->fs_info->nr_async_submits) ||
9573               atomic_read(&root->fs_info->async_delalloc_pages)) {
9574                 wait_event(root->fs_info->async_submit_wait,
9575                    (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
9576                     atomic_read(&root->fs_info->async_delalloc_pages) == 0));
9577         }
9578         atomic_dec(&root->fs_info->async_submit_draining);
9579         return ret;
9580 }
9581
9582 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
9583                                int nr)
9584 {
9585         struct btrfs_root *root;
9586         struct list_head splice;
9587         int ret;
9588
9589         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9590                 return -EROFS;
9591
9592         INIT_LIST_HEAD(&splice);
9593
9594         mutex_lock(&fs_info->delalloc_root_mutex);
9595         spin_lock(&fs_info->delalloc_root_lock);
9596         list_splice_init(&fs_info->delalloc_roots, &splice);
9597         while (!list_empty(&splice) && nr) {
9598                 root = list_first_entry(&splice, struct btrfs_root,
9599                                         delalloc_root);
9600                 root = btrfs_grab_fs_root(root);
9601                 BUG_ON(!root);
9602                 list_move_tail(&root->delalloc_root,
9603                                &fs_info->delalloc_roots);
9604                 spin_unlock(&fs_info->delalloc_root_lock);
9605
9606                 ret = __start_delalloc_inodes(root, delay_iput, nr);
9607                 btrfs_put_fs_root(root);
9608                 if (ret < 0)
9609                         goto out;
9610
9611                 if (nr != -1) {
9612                         nr -= ret;
9613                         WARN_ON(nr < 0);
9614                 }
9615                 spin_lock(&fs_info->delalloc_root_lock);
9616         }
9617         spin_unlock(&fs_info->delalloc_root_lock);
9618
9619         ret = 0;
9620         atomic_inc(&fs_info->async_submit_draining);
9621         while (atomic_read(&fs_info->nr_async_submits) ||
9622               atomic_read(&fs_info->async_delalloc_pages)) {
9623                 wait_event(fs_info->async_submit_wait,
9624                    (atomic_read(&fs_info->nr_async_submits) == 0 &&
9625                     atomic_read(&fs_info->async_delalloc_pages) == 0));
9626         }
9627         atomic_dec(&fs_info->async_submit_draining);
9628 out:
9629         if (!list_empty_careful(&splice)) {
9630                 spin_lock(&fs_info->delalloc_root_lock);
9631                 list_splice_tail(&splice, &fs_info->delalloc_roots);
9632                 spin_unlock(&fs_info->delalloc_root_lock);
9633         }
9634         mutex_unlock(&fs_info->delalloc_root_mutex);
9635         return ret;
9636 }
9637
9638 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9639                          const char *symname)
9640 {
9641         struct btrfs_trans_handle *trans;
9642         struct btrfs_root *root = BTRFS_I(dir)->root;
9643         struct btrfs_path *path;
9644         struct btrfs_key key;
9645         struct inode *inode = NULL;
9646         int err;
9647         int drop_inode = 0;
9648         u64 objectid;
9649         u64 index = 0;
9650         int name_len;
9651         int datasize;
9652         unsigned long ptr;
9653         struct btrfs_file_extent_item *ei;
9654         struct extent_buffer *leaf;
9655
9656         name_len = strlen(symname);
9657         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
9658                 return -ENAMETOOLONG;
9659
9660         /*
9661          * 2 items for inode item and ref
9662          * 2 items for dir items
9663          * 1 item for updating parent inode item
9664          * 1 item for the inline extent item
9665          * 1 item for xattr if selinux is on
9666          */
9667         trans = btrfs_start_transaction(root, 7);
9668         if (IS_ERR(trans))
9669                 return PTR_ERR(trans);
9670
9671         err = btrfs_find_free_ino(root, &objectid);
9672         if (err)
9673                 goto out_unlock;
9674
9675         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
9676                                 dentry->d_name.len, btrfs_ino(dir), objectid,
9677                                 S_IFLNK|S_IRWXUGO, &index);
9678         if (IS_ERR(inode)) {
9679                 err = PTR_ERR(inode);
9680                 goto out_unlock;
9681         }
9682
9683         /*
9684         * If the active LSM wants to access the inode during
9685         * d_instantiate it needs these. Smack checks to see
9686         * if the filesystem supports xattrs by looking at the
9687         * ops vector.
9688         */
9689         inode->i_fop = &btrfs_file_operations;
9690         inode->i_op = &btrfs_file_inode_operations;
9691         inode->i_mapping->a_ops = &btrfs_aops;
9692         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9693
9694         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
9695         if (err)
9696                 goto out_unlock_inode;
9697
9698         path = btrfs_alloc_path();
9699         if (!path) {
9700                 err = -ENOMEM;
9701                 goto out_unlock_inode;
9702         }
9703         key.objectid = btrfs_ino(inode);
9704         key.offset = 0;
9705         key.type = BTRFS_EXTENT_DATA_KEY;
9706         datasize = btrfs_file_extent_calc_inline_size(name_len);
9707         err = btrfs_insert_empty_item(trans, root, path, &key,
9708                                       datasize);
9709         if (err) {
9710                 btrfs_free_path(path);
9711                 goto out_unlock_inode;
9712         }
9713         leaf = path->nodes[0];
9714         ei = btrfs_item_ptr(leaf, path->slots[0],
9715                             struct btrfs_file_extent_item);
9716         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9717         btrfs_set_file_extent_type(leaf, ei,
9718                                    BTRFS_FILE_EXTENT_INLINE);
9719         btrfs_set_file_extent_encryption(leaf, ei, 0);
9720         btrfs_set_file_extent_compression(leaf, ei, 0);
9721         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9722         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9723
9724         ptr = btrfs_file_extent_inline_start(ei);
9725         write_extent_buffer(leaf, symname, ptr, name_len);
9726         btrfs_mark_buffer_dirty(leaf);
9727         btrfs_free_path(path);
9728
9729         inode->i_op = &btrfs_symlink_inode_operations;
9730         inode->i_mapping->a_ops = &btrfs_symlink_aops;
9731         inode_set_bytes(inode, name_len);
9732         btrfs_i_size_write(inode, name_len);
9733         err = btrfs_update_inode(trans, root, inode);
9734         /*
9735          * Last step, add directory indexes for our symlink inode. This is the
9736          * last step to avoid extra cleanup of these indexes if an error happens
9737          * elsewhere above.
9738          */
9739         if (!err)
9740                 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
9741         if (err) {
9742                 drop_inode = 1;
9743                 goto out_unlock_inode;
9744         }
9745
9746         unlock_new_inode(inode);
9747         d_instantiate(dentry, inode);
9748
9749 out_unlock:
9750         btrfs_end_transaction(trans, root);
9751         if (drop_inode) {
9752                 inode_dec_link_count(inode);
9753                 iput(inode);
9754         }
9755         btrfs_btree_balance_dirty(root);
9756         return err;
9757
9758 out_unlock_inode:
9759         drop_inode = 1;
9760         unlock_new_inode(inode);
9761         goto out_unlock;
9762 }
9763
9764 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9765                                        u64 start, u64 num_bytes, u64 min_size,
9766                                        loff_t actual_len, u64 *alloc_hint,
9767                                        struct btrfs_trans_handle *trans)
9768 {
9769         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9770         struct extent_map *em;
9771         struct btrfs_root *root = BTRFS_I(inode)->root;
9772         struct btrfs_key ins;
9773         u64 cur_offset = start;
9774         u64 i_size;
9775         u64 cur_bytes;
9776         u64 last_alloc = (u64)-1;
9777         int ret = 0;
9778         bool own_trans = true;
9779
9780         if (trans)
9781                 own_trans = false;
9782         while (num_bytes > 0) {
9783                 if (own_trans) {
9784                         trans = btrfs_start_transaction(root, 3);
9785                         if (IS_ERR(trans)) {
9786                                 ret = PTR_ERR(trans);
9787                                 break;
9788                         }
9789                 }
9790
9791                 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
9792                 cur_bytes = max(cur_bytes, min_size);
9793                 /*
9794                  * If we are severely fragmented we could end up with really
9795                  * small allocations, so if the allocator is returning small
9796                  * chunks lets make its job easier by only searching for those
9797                  * sized chunks.
9798                  */
9799                 cur_bytes = min(cur_bytes, last_alloc);
9800                 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
9801                                            *alloc_hint, &ins, 1, 0);
9802                 if (ret) {
9803                         if (own_trans)
9804                                 btrfs_end_transaction(trans, root);
9805                         break;
9806                 }
9807
9808                 last_alloc = ins.offset;
9809                 ret = insert_reserved_file_extent(trans, inode,
9810                                                   cur_offset, ins.objectid,
9811                                                   ins.offset, ins.offset,
9812                                                   ins.offset, 0, 0, 0,
9813                                                   BTRFS_FILE_EXTENT_PREALLOC);
9814                 if (ret) {
9815                         btrfs_free_reserved_extent(root, ins.objectid,
9816                                                    ins.offset, 0);
9817                         btrfs_abort_transaction(trans, root, ret);
9818                         if (own_trans)
9819                                 btrfs_end_transaction(trans, root);
9820                         break;
9821                 }
9822
9823                 btrfs_drop_extent_cache(inode, cur_offset,
9824                                         cur_offset + ins.offset -1, 0);
9825
9826                 em = alloc_extent_map();
9827                 if (!em) {
9828                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
9829                                 &BTRFS_I(inode)->runtime_flags);
9830                         goto next;
9831                 }
9832
9833                 em->start = cur_offset;
9834                 em->orig_start = cur_offset;
9835                 em->len = ins.offset;
9836                 em->block_start = ins.objectid;
9837                 em->block_len = ins.offset;
9838                 em->orig_block_len = ins.offset;
9839                 em->ram_bytes = ins.offset;
9840                 em->bdev = root->fs_info->fs_devices->latest_bdev;
9841                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9842                 em->generation = trans->transid;
9843
9844                 while (1) {
9845                         write_lock(&em_tree->lock);
9846                         ret = add_extent_mapping(em_tree, em, 1);
9847                         write_unlock(&em_tree->lock);
9848                         if (ret != -EEXIST)
9849                                 break;
9850                         btrfs_drop_extent_cache(inode, cur_offset,
9851                                                 cur_offset + ins.offset - 1,
9852                                                 0);
9853                 }
9854                 free_extent_map(em);
9855 next:
9856                 num_bytes -= ins.offset;
9857                 cur_offset += ins.offset;
9858                 *alloc_hint = ins.objectid + ins.offset;
9859
9860                 inode_inc_iversion(inode);
9861                 inode->i_ctime = CURRENT_TIME;
9862                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9863                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9864                     (actual_len > inode->i_size) &&
9865                     (cur_offset > inode->i_size)) {
9866                         if (cur_offset > actual_len)
9867                                 i_size = actual_len;
9868                         else
9869                                 i_size = cur_offset;
9870                         i_size_write(inode, i_size);
9871                         btrfs_ordered_update_i_size(inode, i_size, NULL);
9872                 }
9873
9874                 ret = btrfs_update_inode(trans, root, inode);
9875
9876                 if (ret) {
9877                         btrfs_abort_transaction(trans, root, ret);
9878                         if (own_trans)
9879                                 btrfs_end_transaction(trans, root);
9880                         break;
9881                 }
9882
9883                 if (own_trans)
9884                         btrfs_end_transaction(trans, root);
9885         }
9886         return ret;
9887 }
9888
9889 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9890                               u64 start, u64 num_bytes, u64 min_size,
9891                               loff_t actual_len, u64 *alloc_hint)
9892 {
9893         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9894                                            min_size, actual_len, alloc_hint,
9895                                            NULL);
9896 }
9897
9898 int btrfs_prealloc_file_range_trans(struct inode *inode,
9899                                     struct btrfs_trans_handle *trans, int mode,
9900                                     u64 start, u64 num_bytes, u64 min_size,
9901                                     loff_t actual_len, u64 *alloc_hint)
9902 {
9903         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9904                                            min_size, actual_len, alloc_hint, trans);
9905 }
9906
9907 static int btrfs_set_page_dirty(struct page *page)
9908 {
9909         return __set_page_dirty_nobuffers(page);
9910 }
9911
9912 static int btrfs_permission(struct inode *inode, int mask)
9913 {
9914         struct btrfs_root *root = BTRFS_I(inode)->root;
9915         umode_t mode = inode->i_mode;
9916
9917         if (mask & MAY_WRITE &&
9918             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9919                 if (btrfs_root_readonly(root))
9920                         return -EROFS;
9921                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9922                         return -EACCES;
9923         }
9924         return generic_permission(inode, mask);
9925 }
9926
9927 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
9928 {
9929         struct btrfs_trans_handle *trans;
9930         struct btrfs_root *root = BTRFS_I(dir)->root;
9931         struct inode *inode = NULL;
9932         u64 objectid;
9933         u64 index;
9934         int ret = 0;
9935
9936         /*
9937          * 5 units required for adding orphan entry
9938          */
9939         trans = btrfs_start_transaction(root, 5);
9940         if (IS_ERR(trans))
9941                 return PTR_ERR(trans);
9942
9943         ret = btrfs_find_free_ino(root, &objectid);
9944         if (ret)
9945                 goto out;
9946
9947         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
9948                                 btrfs_ino(dir), objectid, mode, &index);
9949         if (IS_ERR(inode)) {
9950                 ret = PTR_ERR(inode);
9951                 inode = NULL;
9952                 goto out;
9953         }
9954
9955         inode->i_fop = &btrfs_file_operations;
9956         inode->i_op = &btrfs_file_inode_operations;
9957
9958         inode->i_mapping->a_ops = &btrfs_aops;
9959         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9960
9961         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
9962         if (ret)
9963                 goto out_inode;
9964
9965         ret = btrfs_update_inode(trans, root, inode);
9966         if (ret)
9967                 goto out_inode;
9968         ret = btrfs_orphan_add(trans, inode);
9969         if (ret)
9970                 goto out_inode;
9971
9972         /*
9973          * We set number of links to 0 in btrfs_new_inode(), and here we set
9974          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
9975          * through:
9976          *
9977          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9978          */
9979         set_nlink(inode, 1);
9980         unlock_new_inode(inode);
9981         d_tmpfile(dentry, inode);
9982         mark_inode_dirty(inode);
9983
9984 out:
9985         btrfs_end_transaction(trans, root);
9986         if (ret)
9987                 iput(inode);
9988         btrfs_balance_delayed_items(root);
9989         btrfs_btree_balance_dirty(root);
9990         return ret;
9991
9992 out_inode:
9993         unlock_new_inode(inode);
9994         goto out;
9995
9996 }
9997
9998 /* Inspired by filemap_check_errors() */
9999 int btrfs_inode_check_errors(struct inode *inode)
10000 {
10001         int ret = 0;
10002
10003         if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
10004             test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
10005                 ret = -ENOSPC;
10006         if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
10007             test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
10008                 ret = -EIO;
10009
10010         return ret;
10011 }
10012
10013 static const struct inode_operations btrfs_dir_inode_operations = {
10014         .getattr        = btrfs_getattr,
10015         .lookup         = btrfs_lookup,
10016         .create         = btrfs_create,
10017         .unlink         = btrfs_unlink,
10018         .link           = btrfs_link,
10019         .mkdir          = btrfs_mkdir,
10020         .rmdir          = btrfs_rmdir,
10021         .rename2        = btrfs_rename2,
10022         .symlink        = btrfs_symlink,
10023         .setattr        = btrfs_setattr,
10024         .mknod          = btrfs_mknod,
10025         .setxattr       = btrfs_setxattr,
10026         .getxattr       = btrfs_getxattr,
10027         .listxattr      = btrfs_listxattr,
10028         .removexattr    = btrfs_removexattr,
10029         .permission     = btrfs_permission,
10030         .get_acl        = btrfs_get_acl,
10031         .set_acl        = btrfs_set_acl,
10032         .update_time    = btrfs_update_time,
10033         .tmpfile        = btrfs_tmpfile,
10034 };
10035 static const struct inode_operations btrfs_dir_ro_inode_operations = {
10036         .lookup         = btrfs_lookup,
10037         .permission     = btrfs_permission,
10038         .get_acl        = btrfs_get_acl,
10039         .set_acl        = btrfs_set_acl,
10040         .update_time    = btrfs_update_time,
10041 };
10042
10043 static const struct file_operations btrfs_dir_file_operations = {
10044         .llseek         = generic_file_llseek,
10045         .read           = generic_read_dir,
10046         .iterate        = btrfs_real_readdir,
10047         .unlocked_ioctl = btrfs_ioctl,
10048 #ifdef CONFIG_COMPAT
10049         .compat_ioctl   = btrfs_ioctl,
10050 #endif
10051         .release        = btrfs_release_file,
10052         .fsync          = btrfs_sync_file,
10053 };
10054
10055 static struct extent_io_ops btrfs_extent_io_ops = {
10056         .fill_delalloc = run_delalloc_range,
10057         .submit_bio_hook = btrfs_submit_bio_hook,
10058         .merge_bio_hook = btrfs_merge_bio_hook,
10059         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10060         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
10061         .writepage_start_hook = btrfs_writepage_start_hook,
10062         .set_bit_hook = btrfs_set_bit_hook,
10063         .clear_bit_hook = btrfs_clear_bit_hook,
10064         .merge_extent_hook = btrfs_merge_extent_hook,
10065         .split_extent_hook = btrfs_split_extent_hook,
10066 };
10067
10068 /*
10069  * btrfs doesn't support the bmap operation because swapfiles
10070  * use bmap to make a mapping of extents in the file.  They assume
10071  * these extents won't change over the life of the file and they
10072  * use the bmap result to do IO directly to the drive.
10073  *
10074  * the btrfs bmap call would return logical addresses that aren't
10075  * suitable for IO and they also will change frequently as COW
10076  * operations happen.  So, swapfile + btrfs == corruption.
10077  *
10078  * For now we're avoiding this by dropping bmap.
10079  */
10080 static const struct address_space_operations btrfs_aops = {
10081         .readpage       = btrfs_readpage,
10082         .writepage      = btrfs_writepage,
10083         .writepages     = btrfs_writepages,
10084         .readpages      = btrfs_readpages,
10085         .direct_IO      = btrfs_direct_IO,
10086         .invalidatepage = btrfs_invalidatepage,
10087         .releasepage    = btrfs_releasepage,
10088         .set_page_dirty = btrfs_set_page_dirty,
10089         .error_remove_page = generic_error_remove_page,
10090 };
10091
10092 static const struct address_space_operations btrfs_symlink_aops = {
10093         .readpage       = btrfs_readpage,
10094         .writepage      = btrfs_writepage,
10095         .invalidatepage = btrfs_invalidatepage,
10096         .releasepage    = btrfs_releasepage,
10097 };
10098
10099 static const struct inode_operations btrfs_file_inode_operations = {
10100         .getattr        = btrfs_getattr,
10101         .setattr        = btrfs_setattr,
10102         .setxattr       = btrfs_setxattr,
10103         .getxattr       = btrfs_getxattr,
10104         .listxattr      = btrfs_listxattr,
10105         .removexattr    = btrfs_removexattr,
10106         .permission     = btrfs_permission,
10107         .fiemap         = btrfs_fiemap,
10108         .get_acl        = btrfs_get_acl,
10109         .set_acl        = btrfs_set_acl,
10110         .update_time    = btrfs_update_time,
10111 };
10112 static const struct inode_operations btrfs_special_inode_operations = {
10113         .getattr        = btrfs_getattr,
10114         .setattr        = btrfs_setattr,
10115         .permission     = btrfs_permission,
10116         .setxattr       = btrfs_setxattr,
10117         .getxattr       = btrfs_getxattr,
10118         .listxattr      = btrfs_listxattr,
10119         .removexattr    = btrfs_removexattr,
10120         .get_acl        = btrfs_get_acl,
10121         .set_acl        = btrfs_set_acl,
10122         .update_time    = btrfs_update_time,
10123 };
10124 static const struct inode_operations btrfs_symlink_inode_operations = {
10125         .readlink       = generic_readlink,
10126         .follow_link    = page_follow_link_light,
10127         .put_link       = page_put_link,
10128         .getattr        = btrfs_getattr,
10129         .setattr        = btrfs_setattr,
10130         .permission     = btrfs_permission,
10131         .setxattr       = btrfs_setxattr,
10132         .getxattr       = btrfs_getxattr,
10133         .listxattr      = btrfs_listxattr,
10134         .removexattr    = btrfs_removexattr,
10135         .update_time    = btrfs_update_time,
10136 };
10137
10138 const struct dentry_operations btrfs_dentry_operations = {
10139         .d_delete       = btrfs_dentry_delete,
10140         .d_release      = btrfs_dentry_release,
10141 };