2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
38 static int g_verbose = 0;
40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
43 * A fs_path is a helper to dynamically build path names with unknown size.
44 * It reallocates the internal buffer on demand.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
47 * unreversed if needed.
56 unsigned short buf_len:15;
57 unsigned short reversed:1;
61 * Average path length does not exceed 200 bytes, we'll have
62 * better packing in the slab and higher chance to satisfy
63 * a allocation later during send.
68 #define FS_PATH_INLINE_SIZE \
69 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
72 /* reused for each extent */
74 struct btrfs_root *root;
81 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
82 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
85 struct file *send_filp;
91 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
92 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
94 struct btrfs_root *send_root;
95 struct btrfs_root *parent_root;
96 struct clone_root *clone_roots;
99 /* current state of the compare_tree call */
100 struct btrfs_path *left_path;
101 struct btrfs_path *right_path;
102 struct btrfs_key *cmp_key;
105 * infos of the currently processed inode. In case of deleted inodes,
106 * these are the values from the deleted inode.
111 int cur_inode_new_gen;
112 int cur_inode_deleted;
115 u64 cur_inode_last_extent;
119 struct list_head new_refs;
120 struct list_head deleted_refs;
122 struct radix_tree_root name_cache;
123 struct list_head name_cache_list;
129 * We process inodes by their increasing order, so if before an
130 * incremental send we reverse the parent/child relationship of
131 * directories such that a directory with a lower inode number was
132 * the parent of a directory with a higher inode number, and the one
133 * becoming the new parent got renamed too, we can't rename/move the
134 * directory with lower inode number when we finish processing it - we
135 * must process the directory with higher inode number first, then
136 * rename/move it and then rename/move the directory with lower inode
137 * number. Example follows.
139 * Tree state when the first send was performed:
151 * Tree state when the second (incremental) send is performed:
160 * The sequence of steps that lead to the second state was:
162 * mv /a/b/c/d /a/b/c2/d2
163 * mv /a/b/c /a/b/c2/d2/cc
165 * "c" has lower inode number, but we can't move it (2nd mv operation)
166 * before we move "d", which has higher inode number.
168 * So we just memorize which move/rename operations must be performed
169 * later when their respective parent is processed and moved/renamed.
172 /* Indexed by parent directory inode number. */
173 struct rb_root pending_dir_moves;
176 * Reverse index, indexed by the inode number of a directory that
177 * is waiting for the move/rename of its immediate parent before its
178 * own move/rename can be performed.
180 struct rb_root waiting_dir_moves;
183 * A directory that is going to be rm'ed might have a child directory
184 * which is in the pending directory moves index above. In this case,
185 * the directory can only be removed after the move/rename of its child
186 * is performed. Example:
206 * Sequence of steps that lead to the send snapshot:
207 * rm -f /a/b/c/foo.txt
209 * mv /a/b/c/x /a/b/YY
212 * When the child is processed, its move/rename is delayed until its
213 * parent is processed (as explained above), but all other operations
214 * like update utimes, chown, chgrp, etc, are performed and the paths
215 * that it uses for those operations must use the orphanized name of
216 * its parent (the directory we're going to rm later), so we need to
217 * memorize that name.
219 * Indexed by the inode number of the directory to be deleted.
221 struct rb_root orphan_dirs;
224 struct pending_dir_move {
226 struct list_head list;
230 struct list_head update_refs;
233 struct waiting_dir_move {
237 * There might be some directory that could not be removed because it
238 * was waiting for this directory inode to be moved first. Therefore
239 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
244 struct orphan_dir_info {
250 struct name_cache_entry {
251 struct list_head list;
253 * radix_tree has only 32bit entries but we need to handle 64bit inums.
254 * We use the lower 32bit of the 64bit inum to store it in the tree. If
255 * more then one inum would fall into the same entry, we use radix_list
256 * to store the additional entries. radix_list is also used to store
257 * entries where two entries have the same inum but different
260 struct list_head radix_list;
266 int need_later_update;
271 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
273 static struct waiting_dir_move *
274 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
276 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
278 static int need_send_hole(struct send_ctx *sctx)
280 return (sctx->parent_root && !sctx->cur_inode_new &&
281 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
282 S_ISREG(sctx->cur_inode_mode));
285 static void fs_path_reset(struct fs_path *p)
288 p->start = p->buf + p->buf_len - 1;
298 static struct fs_path *fs_path_alloc(void)
302 p = kmalloc(sizeof(*p), GFP_NOFS);
306 p->buf = p->inline_buf;
307 p->buf_len = FS_PATH_INLINE_SIZE;
312 static struct fs_path *fs_path_alloc_reversed(void)
324 static void fs_path_free(struct fs_path *p)
328 if (p->buf != p->inline_buf)
333 static int fs_path_len(struct fs_path *p)
335 return p->end - p->start;
338 static int fs_path_ensure_buf(struct fs_path *p, int len)
346 if (p->buf_len >= len)
349 path_len = p->end - p->start;
350 old_buf_len = p->buf_len;
353 * First time the inline_buf does not suffice
355 if (p->buf == p->inline_buf)
356 tmp_buf = kmalloc(len, GFP_NOFS);
358 tmp_buf = krealloc(p->buf, len, GFP_NOFS);
363 * The real size of the buffer is bigger, this will let the fast path
364 * happen most of the time
366 p->buf_len = ksize(p->buf);
369 tmp_buf = p->buf + old_buf_len - path_len - 1;
370 p->end = p->buf + p->buf_len - 1;
371 p->start = p->end - path_len;
372 memmove(p->start, tmp_buf, path_len + 1);
375 p->end = p->start + path_len;
380 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
386 new_len = p->end - p->start + name_len;
387 if (p->start != p->end)
389 ret = fs_path_ensure_buf(p, new_len);
394 if (p->start != p->end)
396 p->start -= name_len;
397 *prepared = p->start;
399 if (p->start != p->end)
410 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
415 ret = fs_path_prepare_for_add(p, name_len, &prepared);
418 memcpy(prepared, name, name_len);
424 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
429 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
432 memcpy(prepared, p2->start, p2->end - p2->start);
438 static int fs_path_add_from_extent_buffer(struct fs_path *p,
439 struct extent_buffer *eb,
440 unsigned long off, int len)
445 ret = fs_path_prepare_for_add(p, len, &prepared);
449 read_extent_buffer(eb, prepared, off, len);
455 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
459 p->reversed = from->reversed;
462 ret = fs_path_add_path(p, from);
468 static void fs_path_unreverse(struct fs_path *p)
477 len = p->end - p->start;
479 p->end = p->start + len;
480 memmove(p->start, tmp, len + 1);
484 static struct btrfs_path *alloc_path_for_send(void)
486 struct btrfs_path *path;
488 path = btrfs_alloc_path();
491 path->search_commit_root = 1;
492 path->skip_locking = 1;
496 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
506 ret = vfs_write(filp, (char *)buf + pos, len - pos, off);
507 /* TODO handle that correctly */
508 /*if (ret == -ERESTARTSYS) {
527 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
529 struct btrfs_tlv_header *hdr;
530 int total_len = sizeof(*hdr) + len;
531 int left = sctx->send_max_size - sctx->send_size;
533 if (unlikely(left < total_len))
536 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
537 hdr->tlv_type = cpu_to_le16(attr);
538 hdr->tlv_len = cpu_to_le16(len);
539 memcpy(hdr + 1, data, len);
540 sctx->send_size += total_len;
545 #define TLV_PUT_DEFINE_INT(bits) \
546 static int tlv_put_u##bits(struct send_ctx *sctx, \
547 u##bits attr, u##bits value) \
549 __le##bits __tmp = cpu_to_le##bits(value); \
550 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
553 TLV_PUT_DEFINE_INT(64)
555 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
556 const char *str, int len)
560 return tlv_put(sctx, attr, str, len);
563 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
566 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
569 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
570 struct extent_buffer *eb,
571 struct btrfs_timespec *ts)
573 struct btrfs_timespec bts;
574 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
575 return tlv_put(sctx, attr, &bts, sizeof(bts));
579 #define TLV_PUT(sctx, attrtype, attrlen, data) \
581 ret = tlv_put(sctx, attrtype, attrlen, data); \
583 goto tlv_put_failure; \
586 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
588 ret = tlv_put_u##bits(sctx, attrtype, value); \
590 goto tlv_put_failure; \
593 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
594 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
595 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
596 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
597 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
599 ret = tlv_put_string(sctx, attrtype, str, len); \
601 goto tlv_put_failure; \
603 #define TLV_PUT_PATH(sctx, attrtype, p) \
605 ret = tlv_put_string(sctx, attrtype, p->start, \
606 p->end - p->start); \
608 goto tlv_put_failure; \
610 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
612 ret = tlv_put_uuid(sctx, attrtype, uuid); \
614 goto tlv_put_failure; \
616 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
618 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
620 goto tlv_put_failure; \
623 static int send_header(struct send_ctx *sctx)
625 struct btrfs_stream_header hdr;
627 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
628 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
630 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
635 * For each command/item we want to send to userspace, we call this function.
637 static int begin_cmd(struct send_ctx *sctx, int cmd)
639 struct btrfs_cmd_header *hdr;
641 if (WARN_ON(!sctx->send_buf))
644 BUG_ON(sctx->send_size);
646 sctx->send_size += sizeof(*hdr);
647 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
648 hdr->cmd = cpu_to_le16(cmd);
653 static int send_cmd(struct send_ctx *sctx)
656 struct btrfs_cmd_header *hdr;
659 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
660 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
663 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
664 hdr->crc = cpu_to_le32(crc);
666 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
669 sctx->total_send_size += sctx->send_size;
670 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
677 * Sends a move instruction to user space
679 static int send_rename(struct send_ctx *sctx,
680 struct fs_path *from, struct fs_path *to)
684 verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
686 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
690 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
691 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
693 ret = send_cmd(sctx);
701 * Sends a link instruction to user space
703 static int send_link(struct send_ctx *sctx,
704 struct fs_path *path, struct fs_path *lnk)
708 verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
710 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
714 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
715 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
717 ret = send_cmd(sctx);
725 * Sends an unlink instruction to user space
727 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
731 verbose_printk("btrfs: send_unlink %s\n", path->start);
733 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
737 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
739 ret = send_cmd(sctx);
747 * Sends a rmdir instruction to user space
749 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
753 verbose_printk("btrfs: send_rmdir %s\n", path->start);
755 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
759 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
761 ret = send_cmd(sctx);
769 * Helper function to retrieve some fields from an inode item.
771 static int get_inode_info(struct btrfs_root *root,
772 u64 ino, u64 *size, u64 *gen,
773 u64 *mode, u64 *uid, u64 *gid,
777 struct btrfs_inode_item *ii;
778 struct btrfs_key key;
779 struct btrfs_path *path;
781 path = alloc_path_for_send();
786 key.type = BTRFS_INODE_ITEM_KEY;
788 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
796 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
797 struct btrfs_inode_item);
799 *size = btrfs_inode_size(path->nodes[0], ii);
801 *gen = btrfs_inode_generation(path->nodes[0], ii);
803 *mode = btrfs_inode_mode(path->nodes[0], ii);
805 *uid = btrfs_inode_uid(path->nodes[0], ii);
807 *gid = btrfs_inode_gid(path->nodes[0], ii);
809 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
812 btrfs_free_path(path);
816 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
821 * Helper function to iterate the entries in ONE btrfs_inode_ref or
822 * btrfs_inode_extref.
823 * The iterate callback may return a non zero value to stop iteration. This can
824 * be a negative value for error codes or 1 to simply stop it.
826 * path must point to the INODE_REF or INODE_EXTREF when called.
828 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
829 struct btrfs_key *found_key, int resolve,
830 iterate_inode_ref_t iterate, void *ctx)
832 struct extent_buffer *eb = path->nodes[0];
833 struct btrfs_item *item;
834 struct btrfs_inode_ref *iref;
835 struct btrfs_inode_extref *extref;
836 struct btrfs_path *tmp_path;
840 int slot = path->slots[0];
847 unsigned long name_off;
848 unsigned long elem_size;
851 p = fs_path_alloc_reversed();
855 tmp_path = alloc_path_for_send();
862 if (found_key->type == BTRFS_INODE_REF_KEY) {
863 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
864 struct btrfs_inode_ref);
865 item = btrfs_item_nr(slot);
866 total = btrfs_item_size(eb, item);
867 elem_size = sizeof(*iref);
869 ptr = btrfs_item_ptr_offset(eb, slot);
870 total = btrfs_item_size_nr(eb, slot);
871 elem_size = sizeof(*extref);
874 while (cur < total) {
877 if (found_key->type == BTRFS_INODE_REF_KEY) {
878 iref = (struct btrfs_inode_ref *)(ptr + cur);
879 name_len = btrfs_inode_ref_name_len(eb, iref);
880 name_off = (unsigned long)(iref + 1);
881 index = btrfs_inode_ref_index(eb, iref);
882 dir = found_key->offset;
884 extref = (struct btrfs_inode_extref *)(ptr + cur);
885 name_len = btrfs_inode_extref_name_len(eb, extref);
886 name_off = (unsigned long)&extref->name;
887 index = btrfs_inode_extref_index(eb, extref);
888 dir = btrfs_inode_extref_parent(eb, extref);
892 start = btrfs_ref_to_path(root, tmp_path, name_len,
896 ret = PTR_ERR(start);
899 if (start < p->buf) {
900 /* overflow , try again with larger buffer */
901 ret = fs_path_ensure_buf(p,
902 p->buf_len + p->buf - start);
905 start = btrfs_ref_to_path(root, tmp_path,
910 ret = PTR_ERR(start);
913 BUG_ON(start < p->buf);
917 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
923 cur += elem_size + name_len;
924 ret = iterate(num, dir, index, p, ctx);
931 btrfs_free_path(tmp_path);
936 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
937 const char *name, int name_len,
938 const char *data, int data_len,
942 * Helper function to iterate the entries in ONE btrfs_dir_item.
943 * The iterate callback may return a non zero value to stop iteration. This can
944 * be a negative value for error codes or 1 to simply stop it.
946 * path must point to the dir item when called.
948 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
949 struct btrfs_key *found_key,
950 iterate_dir_item_t iterate, void *ctx)
953 struct extent_buffer *eb;
954 struct btrfs_item *item;
955 struct btrfs_dir_item *di;
956 struct btrfs_key di_key;
958 const int buf_len = PATH_MAX;
968 buf = kmalloc(buf_len, GFP_NOFS);
975 slot = path->slots[0];
976 item = btrfs_item_nr(slot);
977 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
980 total = btrfs_item_size(eb, item);
983 while (cur < total) {
984 name_len = btrfs_dir_name_len(eb, di);
985 data_len = btrfs_dir_data_len(eb, di);
986 type = btrfs_dir_type(eb, di);
987 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
992 if (name_len + data_len > buf_len) {
997 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
998 name_len + data_len);
1000 len = sizeof(*di) + name_len + data_len;
1001 di = (struct btrfs_dir_item *)((char *)di + len);
1004 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1005 data_len, type, ctx);
1021 static int __copy_first_ref(int num, u64 dir, int index,
1022 struct fs_path *p, void *ctx)
1025 struct fs_path *pt = ctx;
1027 ret = fs_path_copy(pt, p);
1031 /* we want the first only */
1036 * Retrieve the first path of an inode. If an inode has more then one
1037 * ref/hardlink, this is ignored.
1039 static int get_inode_path(struct btrfs_root *root,
1040 u64 ino, struct fs_path *path)
1043 struct btrfs_key key, found_key;
1044 struct btrfs_path *p;
1046 p = alloc_path_for_send();
1050 fs_path_reset(path);
1053 key.type = BTRFS_INODE_REF_KEY;
1056 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1063 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1064 if (found_key.objectid != ino ||
1065 (found_key.type != BTRFS_INODE_REF_KEY &&
1066 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1071 ret = iterate_inode_ref(root, p, &found_key, 1,
1072 __copy_first_ref, path);
1082 struct backref_ctx {
1083 struct send_ctx *sctx;
1085 /* number of total found references */
1089 * used for clones found in send_root. clones found behind cur_objectid
1090 * and cur_offset are not considered as allowed clones.
1095 /* may be truncated in case it's the last extent in a file */
1098 /* Just to check for bugs in backref resolving */
1102 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1104 u64 root = (u64)(uintptr_t)key;
1105 struct clone_root *cr = (struct clone_root *)elt;
1107 if (root < cr->root->objectid)
1109 if (root > cr->root->objectid)
1114 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1116 struct clone_root *cr1 = (struct clone_root *)e1;
1117 struct clone_root *cr2 = (struct clone_root *)e2;
1119 if (cr1->root->objectid < cr2->root->objectid)
1121 if (cr1->root->objectid > cr2->root->objectid)
1127 * Called for every backref that is found for the current extent.
1128 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1130 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1132 struct backref_ctx *bctx = ctx_;
1133 struct clone_root *found;
1137 /* First check if the root is in the list of accepted clone sources */
1138 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1139 bctx->sctx->clone_roots_cnt,
1140 sizeof(struct clone_root),
1141 __clone_root_cmp_bsearch);
1145 if (found->root == bctx->sctx->send_root &&
1146 ino == bctx->cur_objectid &&
1147 offset == bctx->cur_offset) {
1148 bctx->found_itself = 1;
1152 * There are inodes that have extents that lie behind its i_size. Don't
1153 * accept clones from these extents.
1155 ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL,
1160 if (offset + bctx->extent_len > i_size)
1164 * Make sure we don't consider clones from send_root that are
1165 * behind the current inode/offset.
1167 if (found->root == bctx->sctx->send_root) {
1169 * TODO for the moment we don't accept clones from the inode
1170 * that is currently send. We may change this when
1171 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1174 if (ino >= bctx->cur_objectid)
1177 if (ino > bctx->cur_objectid)
1179 if (offset + bctx->extent_len > bctx->cur_offset)
1185 found->found_refs++;
1186 if (ino < found->ino) {
1188 found->offset = offset;
1189 } else if (found->ino == ino) {
1191 * same extent found more then once in the same file.
1193 if (found->offset > offset + bctx->extent_len)
1194 found->offset = offset;
1201 * Given an inode, offset and extent item, it finds a good clone for a clone
1202 * instruction. Returns -ENOENT when none could be found. The function makes
1203 * sure that the returned clone is usable at the point where sending is at the
1204 * moment. This means, that no clones are accepted which lie behind the current
1207 * path must point to the extent item when called.
1209 static int find_extent_clone(struct send_ctx *sctx,
1210 struct btrfs_path *path,
1211 u64 ino, u64 data_offset,
1213 struct clone_root **found)
1220 u64 extent_item_pos;
1222 struct btrfs_file_extent_item *fi;
1223 struct extent_buffer *eb = path->nodes[0];
1224 struct backref_ctx *backref_ctx = NULL;
1225 struct clone_root *cur_clone_root;
1226 struct btrfs_key found_key;
1227 struct btrfs_path *tmp_path;
1231 tmp_path = alloc_path_for_send();
1235 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
1241 if (data_offset >= ino_size) {
1243 * There may be extents that lie behind the file's size.
1244 * I at least had this in combination with snapshotting while
1245 * writing large files.
1251 fi = btrfs_item_ptr(eb, path->slots[0],
1252 struct btrfs_file_extent_item);
1253 extent_type = btrfs_file_extent_type(eb, fi);
1254 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1258 compressed = btrfs_file_extent_compression(eb, fi);
1260 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1261 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1262 if (disk_byte == 0) {
1266 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1268 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
1269 &found_key, &flags);
1270 btrfs_release_path(tmp_path);
1274 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1280 * Setup the clone roots.
1282 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1283 cur_clone_root = sctx->clone_roots + i;
1284 cur_clone_root->ino = (u64)-1;
1285 cur_clone_root->offset = 0;
1286 cur_clone_root->found_refs = 0;
1289 backref_ctx->sctx = sctx;
1290 backref_ctx->found = 0;
1291 backref_ctx->cur_objectid = ino;
1292 backref_ctx->cur_offset = data_offset;
1293 backref_ctx->found_itself = 0;
1294 backref_ctx->extent_len = num_bytes;
1297 * The last extent of a file may be too large due to page alignment.
1298 * We need to adjust extent_len in this case so that the checks in
1299 * __iterate_backrefs work.
1301 if (data_offset + num_bytes >= ino_size)
1302 backref_ctx->extent_len = ino_size - data_offset;
1305 * Now collect all backrefs.
1307 if (compressed == BTRFS_COMPRESS_NONE)
1308 extent_item_pos = logical - found_key.objectid;
1310 extent_item_pos = 0;
1311 ret = iterate_extent_inodes(sctx->send_root->fs_info,
1312 found_key.objectid, extent_item_pos, 1,
1313 __iterate_backrefs, backref_ctx);
1318 if (!backref_ctx->found_itself) {
1319 /* found a bug in backref code? */
1321 btrfs_err(sctx->send_root->fs_info, "did not find backref in "
1322 "send_root. inode=%llu, offset=%llu, "
1323 "disk_byte=%llu found extent=%llu\n",
1324 ino, data_offset, disk_byte, found_key.objectid);
1328 verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
1330 "num_bytes=%llu, logical=%llu\n",
1331 data_offset, ino, num_bytes, logical);
1333 if (!backref_ctx->found)
1334 verbose_printk("btrfs: no clones found\n");
1336 cur_clone_root = NULL;
1337 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1338 if (sctx->clone_roots[i].found_refs) {
1339 if (!cur_clone_root)
1340 cur_clone_root = sctx->clone_roots + i;
1341 else if (sctx->clone_roots[i].root == sctx->send_root)
1342 /* prefer clones from send_root over others */
1343 cur_clone_root = sctx->clone_roots + i;
1348 if (cur_clone_root) {
1349 if (compressed != BTRFS_COMPRESS_NONE) {
1351 * Offsets given by iterate_extent_inodes() are relative
1352 * to the start of the extent, we need to add logical
1353 * offset from the file extent item.
1354 * (See why at backref.c:check_extent_in_eb())
1356 cur_clone_root->offset += btrfs_file_extent_offset(eb,
1359 *found = cur_clone_root;
1366 btrfs_free_path(tmp_path);
1371 static int read_symlink(struct btrfs_root *root,
1373 struct fs_path *dest)
1376 struct btrfs_path *path;
1377 struct btrfs_key key;
1378 struct btrfs_file_extent_item *ei;
1384 path = alloc_path_for_send();
1389 key.type = BTRFS_EXTENT_DATA_KEY;
1391 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1396 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1397 struct btrfs_file_extent_item);
1398 type = btrfs_file_extent_type(path->nodes[0], ei);
1399 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1400 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1401 BUG_ON(compression);
1403 off = btrfs_file_extent_inline_start(ei);
1404 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
1406 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1409 btrfs_free_path(path);
1414 * Helper function to generate a file name that is unique in the root of
1415 * send_root and parent_root. This is used to generate names for orphan inodes.
1417 static int gen_unique_name(struct send_ctx *sctx,
1419 struct fs_path *dest)
1422 struct btrfs_path *path;
1423 struct btrfs_dir_item *di;
1428 path = alloc_path_for_send();
1433 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1435 ASSERT(len < sizeof(tmp));
1437 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1438 path, BTRFS_FIRST_FREE_OBJECTID,
1439 tmp, strlen(tmp), 0);
1440 btrfs_release_path(path);
1446 /* not unique, try again */
1451 if (!sctx->parent_root) {
1457 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1458 path, BTRFS_FIRST_FREE_OBJECTID,
1459 tmp, strlen(tmp), 0);
1460 btrfs_release_path(path);
1466 /* not unique, try again */
1474 ret = fs_path_add(dest, tmp, strlen(tmp));
1477 btrfs_free_path(path);
1482 inode_state_no_change,
1483 inode_state_will_create,
1484 inode_state_did_create,
1485 inode_state_will_delete,
1486 inode_state_did_delete,
1489 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1497 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1499 if (ret < 0 && ret != -ENOENT)
1503 if (!sctx->parent_root) {
1504 right_ret = -ENOENT;
1506 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1507 NULL, NULL, NULL, NULL);
1508 if (ret < 0 && ret != -ENOENT)
1513 if (!left_ret && !right_ret) {
1514 if (left_gen == gen && right_gen == gen) {
1515 ret = inode_state_no_change;
1516 } else if (left_gen == gen) {
1517 if (ino < sctx->send_progress)
1518 ret = inode_state_did_create;
1520 ret = inode_state_will_create;
1521 } else if (right_gen == gen) {
1522 if (ino < sctx->send_progress)
1523 ret = inode_state_did_delete;
1525 ret = inode_state_will_delete;
1529 } else if (!left_ret) {
1530 if (left_gen == gen) {
1531 if (ino < sctx->send_progress)
1532 ret = inode_state_did_create;
1534 ret = inode_state_will_create;
1538 } else if (!right_ret) {
1539 if (right_gen == gen) {
1540 if (ino < sctx->send_progress)
1541 ret = inode_state_did_delete;
1543 ret = inode_state_will_delete;
1555 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1559 ret = get_cur_inode_state(sctx, ino, gen);
1563 if (ret == inode_state_no_change ||
1564 ret == inode_state_did_create ||
1565 ret == inode_state_will_delete)
1575 * Helper function to lookup a dir item in a dir.
1577 static int lookup_dir_item_inode(struct btrfs_root *root,
1578 u64 dir, const char *name, int name_len,
1583 struct btrfs_dir_item *di;
1584 struct btrfs_key key;
1585 struct btrfs_path *path;
1587 path = alloc_path_for_send();
1591 di = btrfs_lookup_dir_item(NULL, root, path,
1592 dir, name, name_len, 0);
1601 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1602 *found_inode = key.objectid;
1603 *found_type = btrfs_dir_type(path->nodes[0], di);
1606 btrfs_free_path(path);
1611 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1612 * generation of the parent dir and the name of the dir entry.
1614 static int get_first_ref(struct btrfs_root *root, u64 ino,
1615 u64 *dir, u64 *dir_gen, struct fs_path *name)
1618 struct btrfs_key key;
1619 struct btrfs_key found_key;
1620 struct btrfs_path *path;
1624 path = alloc_path_for_send();
1629 key.type = BTRFS_INODE_REF_KEY;
1632 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1636 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1638 if (ret || found_key.objectid != ino ||
1639 (found_key.type != BTRFS_INODE_REF_KEY &&
1640 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1645 if (key.type == BTRFS_INODE_REF_KEY) {
1646 struct btrfs_inode_ref *iref;
1647 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1648 struct btrfs_inode_ref);
1649 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1650 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1651 (unsigned long)(iref + 1),
1653 parent_dir = found_key.offset;
1655 struct btrfs_inode_extref *extref;
1656 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1657 struct btrfs_inode_extref);
1658 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1659 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1660 (unsigned long)&extref->name, len);
1661 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1665 btrfs_release_path(path);
1667 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
1675 btrfs_free_path(path);
1679 static int is_first_ref(struct btrfs_root *root,
1681 const char *name, int name_len)
1684 struct fs_path *tmp_name;
1688 tmp_name = fs_path_alloc();
1692 ret = get_first_ref(root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
1696 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1701 ret = !memcmp(tmp_name->start, name, name_len);
1704 fs_path_free(tmp_name);
1709 * Used by process_recorded_refs to determine if a new ref would overwrite an
1710 * already existing ref. In case it detects an overwrite, it returns the
1711 * inode/gen in who_ino/who_gen.
1712 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1713 * to make sure later references to the overwritten inode are possible.
1714 * Orphanizing is however only required for the first ref of an inode.
1715 * process_recorded_refs does an additional is_first_ref check to see if
1716 * orphanizing is really required.
1718 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1719 const char *name, int name_len,
1720 u64 *who_ino, u64 *who_gen)
1724 u64 other_inode = 0;
1727 if (!sctx->parent_root)
1730 ret = is_inode_existent(sctx, dir, dir_gen);
1735 * If we have a parent root we need to verify that the parent dir was
1736 * not delted and then re-created, if it was then we have no overwrite
1737 * and we can just unlink this entry.
1739 if (sctx->parent_root) {
1740 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1742 if (ret < 0 && ret != -ENOENT)
1752 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1753 &other_inode, &other_type);
1754 if (ret < 0 && ret != -ENOENT)
1762 * Check if the overwritten ref was already processed. If yes, the ref
1763 * was already unlinked/moved, so we can safely assume that we will not
1764 * overwrite anything at this point in time.
1766 if (other_inode > sctx->send_progress) {
1767 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1768 who_gen, NULL, NULL, NULL, NULL);
1773 *who_ino = other_inode;
1783 * Checks if the ref was overwritten by an already processed inode. This is
1784 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1785 * thus the orphan name needs be used.
1786 * process_recorded_refs also uses it to avoid unlinking of refs that were
1789 static int did_overwrite_ref(struct send_ctx *sctx,
1790 u64 dir, u64 dir_gen,
1791 u64 ino, u64 ino_gen,
1792 const char *name, int name_len)
1799 if (!sctx->parent_root)
1802 ret = is_inode_existent(sctx, dir, dir_gen);
1806 /* check if the ref was overwritten by another ref */
1807 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1808 &ow_inode, &other_type);
1809 if (ret < 0 && ret != -ENOENT)
1812 /* was never and will never be overwritten */
1817 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1822 if (ow_inode == ino && gen == ino_gen) {
1827 /* we know that it is or will be overwritten. check this now */
1828 if (ow_inode < sctx->send_progress)
1838 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1839 * that got overwritten. This is used by process_recorded_refs to determine
1840 * if it has to use the path as returned by get_cur_path or the orphan name.
1842 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1845 struct fs_path *name = NULL;
1849 if (!sctx->parent_root)
1852 name = fs_path_alloc();
1856 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1860 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1861 name->start, fs_path_len(name));
1869 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1870 * so we need to do some special handling in case we have clashes. This function
1871 * takes care of this with the help of name_cache_entry::radix_list.
1872 * In case of error, nce is kfreed.
1874 static int name_cache_insert(struct send_ctx *sctx,
1875 struct name_cache_entry *nce)
1878 struct list_head *nce_head;
1880 nce_head = radix_tree_lookup(&sctx->name_cache,
1881 (unsigned long)nce->ino);
1883 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
1888 INIT_LIST_HEAD(nce_head);
1890 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
1897 list_add_tail(&nce->radix_list, nce_head);
1898 list_add_tail(&nce->list, &sctx->name_cache_list);
1899 sctx->name_cache_size++;
1904 static void name_cache_delete(struct send_ctx *sctx,
1905 struct name_cache_entry *nce)
1907 struct list_head *nce_head;
1909 nce_head = radix_tree_lookup(&sctx->name_cache,
1910 (unsigned long)nce->ino);
1912 btrfs_err(sctx->send_root->fs_info,
1913 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
1914 nce->ino, sctx->name_cache_size);
1917 list_del(&nce->radix_list);
1918 list_del(&nce->list);
1919 sctx->name_cache_size--;
1922 * We may not get to the final release of nce_head if the lookup fails
1924 if (nce_head && list_empty(nce_head)) {
1925 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
1930 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
1933 struct list_head *nce_head;
1934 struct name_cache_entry *cur;
1936 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
1940 list_for_each_entry(cur, nce_head, radix_list) {
1941 if (cur->ino == ino && cur->gen == gen)
1948 * Removes the entry from the list and adds it back to the end. This marks the
1949 * entry as recently used so that name_cache_clean_unused does not remove it.
1951 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
1953 list_del(&nce->list);
1954 list_add_tail(&nce->list, &sctx->name_cache_list);
1958 * Remove some entries from the beginning of name_cache_list.
1960 static void name_cache_clean_unused(struct send_ctx *sctx)
1962 struct name_cache_entry *nce;
1964 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
1967 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
1968 nce = list_entry(sctx->name_cache_list.next,
1969 struct name_cache_entry, list);
1970 name_cache_delete(sctx, nce);
1975 static void name_cache_free(struct send_ctx *sctx)
1977 struct name_cache_entry *nce;
1979 while (!list_empty(&sctx->name_cache_list)) {
1980 nce = list_entry(sctx->name_cache_list.next,
1981 struct name_cache_entry, list);
1982 name_cache_delete(sctx, nce);
1988 * Used by get_cur_path for each ref up to the root.
1989 * Returns 0 if it succeeded.
1990 * Returns 1 if the inode is not existent or got overwritten. In that case, the
1991 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
1992 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
1993 * Returns <0 in case of error.
1995 static int __get_cur_name_and_parent(struct send_ctx *sctx,
1999 struct fs_path *dest)
2003 struct btrfs_path *path = NULL;
2004 struct name_cache_entry *nce = NULL;
2007 * First check if we already did a call to this function with the same
2008 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2009 * return the cached result.
2011 nce = name_cache_search(sctx, ino, gen);
2013 if (ino < sctx->send_progress && nce->need_later_update) {
2014 name_cache_delete(sctx, nce);
2018 name_cache_used(sctx, nce);
2019 *parent_ino = nce->parent_ino;
2020 *parent_gen = nce->parent_gen;
2021 ret = fs_path_add(dest, nce->name, nce->name_len);
2029 path = alloc_path_for_send();
2034 * If the inode is not existent yet, add the orphan name and return 1.
2035 * This should only happen for the parent dir that we determine in
2038 ret = is_inode_existent(sctx, ino, gen);
2043 ret = gen_unique_name(sctx, ino, gen, dest);
2051 * Depending on whether the inode was already processed or not, use
2052 * send_root or parent_root for ref lookup.
2054 if (ino < sctx->send_progress)
2055 ret = get_first_ref(sctx->send_root, ino,
2056 parent_ino, parent_gen, dest);
2058 ret = get_first_ref(sctx->parent_root, ino,
2059 parent_ino, parent_gen, dest);
2064 * Check if the ref was overwritten by an inode's ref that was processed
2065 * earlier. If yes, treat as orphan and return 1.
2067 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2068 dest->start, dest->end - dest->start);
2072 fs_path_reset(dest);
2073 ret = gen_unique_name(sctx, ino, gen, dest);
2081 * Store the result of the lookup in the name cache.
2083 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
2091 nce->parent_ino = *parent_ino;
2092 nce->parent_gen = *parent_gen;
2093 nce->name_len = fs_path_len(dest);
2095 strcpy(nce->name, dest->start);
2097 if (ino < sctx->send_progress)
2098 nce->need_later_update = 0;
2100 nce->need_later_update = 1;
2102 nce_ret = name_cache_insert(sctx, nce);
2105 name_cache_clean_unused(sctx);
2108 btrfs_free_path(path);
2113 * Magic happens here. This function returns the first ref to an inode as it
2114 * would look like while receiving the stream at this point in time.
2115 * We walk the path up to the root. For every inode in between, we check if it
2116 * was already processed/sent. If yes, we continue with the parent as found
2117 * in send_root. If not, we continue with the parent as found in parent_root.
2118 * If we encounter an inode that was deleted at this point in time, we use the
2119 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2120 * that were not created yet and overwritten inodes/refs.
2122 * When do we have have orphan inodes:
2123 * 1. When an inode is freshly created and thus no valid refs are available yet
2124 * 2. When a directory lost all it's refs (deleted) but still has dir items
2125 * inside which were not processed yet (pending for move/delete). If anyone
2126 * tried to get the path to the dir items, it would get a path inside that
2128 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2129 * of an unprocessed inode. If in that case the first ref would be
2130 * overwritten, the overwritten inode gets "orphanized". Later when we
2131 * process this overwritten inode, it is restored at a new place by moving
2134 * sctx->send_progress tells this function at which point in time receiving
2137 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2138 struct fs_path *dest)
2141 struct fs_path *name = NULL;
2142 u64 parent_inode = 0;
2146 name = fs_path_alloc();
2153 fs_path_reset(dest);
2155 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2156 fs_path_reset(name);
2158 if (is_waiting_for_rm(sctx, ino)) {
2159 ret = gen_unique_name(sctx, ino, gen, name);
2162 ret = fs_path_add_path(dest, name);
2166 if (is_waiting_for_move(sctx, ino)) {
2167 ret = get_first_ref(sctx->parent_root, ino,
2168 &parent_inode, &parent_gen, name);
2170 ret = __get_cur_name_and_parent(sctx, ino, gen,
2180 ret = fs_path_add_path(dest, name);
2191 fs_path_unreverse(dest);
2196 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2198 static int send_subvol_begin(struct send_ctx *sctx)
2201 struct btrfs_root *send_root = sctx->send_root;
2202 struct btrfs_root *parent_root = sctx->parent_root;
2203 struct btrfs_path *path;
2204 struct btrfs_key key;
2205 struct btrfs_root_ref *ref;
2206 struct extent_buffer *leaf;
2210 path = btrfs_alloc_path();
2214 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
2216 btrfs_free_path(path);
2220 key.objectid = send_root->objectid;
2221 key.type = BTRFS_ROOT_BACKREF_KEY;
2224 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2233 leaf = path->nodes[0];
2234 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2235 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2236 key.objectid != send_root->objectid) {
2240 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2241 namelen = btrfs_root_ref_name_len(leaf, ref);
2242 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2243 btrfs_release_path(path);
2246 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2250 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2255 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2256 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2257 sctx->send_root->root_item.uuid);
2258 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2259 le64_to_cpu(sctx->send_root->root_item.ctransid));
2261 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2262 sctx->parent_root->root_item.uuid);
2263 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2264 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2267 ret = send_cmd(sctx);
2271 btrfs_free_path(path);
2276 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2281 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
2283 p = fs_path_alloc();
2287 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2291 ret = get_cur_path(sctx, ino, gen, p);
2294 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2295 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2297 ret = send_cmd(sctx);
2305 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2310 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
2312 p = fs_path_alloc();
2316 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2320 ret = get_cur_path(sctx, ino, gen, p);
2323 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2324 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2326 ret = send_cmd(sctx);
2334 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2339 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
2341 p = fs_path_alloc();
2345 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2349 ret = get_cur_path(sctx, ino, gen, p);
2352 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2353 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2354 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2356 ret = send_cmd(sctx);
2364 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2367 struct fs_path *p = NULL;
2368 struct btrfs_inode_item *ii;
2369 struct btrfs_path *path = NULL;
2370 struct extent_buffer *eb;
2371 struct btrfs_key key;
2374 verbose_printk("btrfs: send_utimes %llu\n", ino);
2376 p = fs_path_alloc();
2380 path = alloc_path_for_send();
2387 key.type = BTRFS_INODE_ITEM_KEY;
2389 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2393 eb = path->nodes[0];
2394 slot = path->slots[0];
2395 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2397 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2401 ret = get_cur_path(sctx, ino, gen, p);
2404 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2405 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
2406 btrfs_inode_atime(ii));
2407 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
2408 btrfs_inode_mtime(ii));
2409 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
2410 btrfs_inode_ctime(ii));
2411 /* TODO Add otime support when the otime patches get into upstream */
2413 ret = send_cmd(sctx);
2418 btrfs_free_path(path);
2423 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2424 * a valid path yet because we did not process the refs yet. So, the inode
2425 * is created as orphan.
2427 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2436 verbose_printk("btrfs: send_create_inode %llu\n", ino);
2438 p = fs_path_alloc();
2442 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL,
2447 if (S_ISREG(mode)) {
2448 cmd = BTRFS_SEND_C_MKFILE;
2449 } else if (S_ISDIR(mode)) {
2450 cmd = BTRFS_SEND_C_MKDIR;
2451 } else if (S_ISLNK(mode)) {
2452 cmd = BTRFS_SEND_C_SYMLINK;
2453 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2454 cmd = BTRFS_SEND_C_MKNOD;
2455 } else if (S_ISFIFO(mode)) {
2456 cmd = BTRFS_SEND_C_MKFIFO;
2457 } else if (S_ISSOCK(mode)) {
2458 cmd = BTRFS_SEND_C_MKSOCK;
2460 printk(KERN_WARNING "btrfs: unexpected inode type %o",
2461 (int)(mode & S_IFMT));
2466 ret = begin_cmd(sctx, cmd);
2470 ret = gen_unique_name(sctx, ino, gen, p);
2474 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2475 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2477 if (S_ISLNK(mode)) {
2479 ret = read_symlink(sctx->send_root, ino, p);
2482 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2483 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2484 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2485 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2486 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2489 ret = send_cmd(sctx);
2501 * We need some special handling for inodes that get processed before the parent
2502 * directory got created. See process_recorded_refs for details.
2503 * This function does the check if we already created the dir out of order.
2505 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2508 struct btrfs_path *path = NULL;
2509 struct btrfs_key key;
2510 struct btrfs_key found_key;
2511 struct btrfs_key di_key;
2512 struct extent_buffer *eb;
2513 struct btrfs_dir_item *di;
2516 path = alloc_path_for_send();
2523 key.type = BTRFS_DIR_INDEX_KEY;
2525 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2530 eb = path->nodes[0];
2531 slot = path->slots[0];
2532 if (slot >= btrfs_header_nritems(eb)) {
2533 ret = btrfs_next_leaf(sctx->send_root, path);
2536 } else if (ret > 0) {
2543 btrfs_item_key_to_cpu(eb, &found_key, slot);
2544 if (found_key.objectid != key.objectid ||
2545 found_key.type != key.type) {
2550 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2551 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2553 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2554 di_key.objectid < sctx->send_progress) {
2563 btrfs_free_path(path);
2568 * Only creates the inode if it is:
2569 * 1. Not a directory
2570 * 2. Or a directory which was not created already due to out of order
2571 * directories. See did_create_dir and process_recorded_refs for details.
2573 static int send_create_inode_if_needed(struct send_ctx *sctx)
2577 if (S_ISDIR(sctx->cur_inode_mode)) {
2578 ret = did_create_dir(sctx, sctx->cur_ino);
2587 ret = send_create_inode(sctx, sctx->cur_ino);
2595 struct recorded_ref {
2596 struct list_head list;
2599 struct fs_path *full_path;
2607 * We need to process new refs before deleted refs, but compare_tree gives us
2608 * everything mixed. So we first record all refs and later process them.
2609 * This function is a helper to record one ref.
2611 static int record_ref(struct list_head *head, u64 dir,
2612 u64 dir_gen, struct fs_path *path)
2614 struct recorded_ref *ref;
2616 ref = kmalloc(sizeof(*ref), GFP_NOFS);
2621 ref->dir_gen = dir_gen;
2622 ref->full_path = path;
2624 ref->name = (char *)kbasename(ref->full_path->start);
2625 ref->name_len = ref->full_path->end - ref->name;
2626 ref->dir_path = ref->full_path->start;
2627 if (ref->name == ref->full_path->start)
2628 ref->dir_path_len = 0;
2630 ref->dir_path_len = ref->full_path->end -
2631 ref->full_path->start - 1 - ref->name_len;
2633 list_add_tail(&ref->list, head);
2637 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2639 struct recorded_ref *new;
2641 new = kmalloc(sizeof(*ref), GFP_NOFS);
2645 new->dir = ref->dir;
2646 new->dir_gen = ref->dir_gen;
2647 new->full_path = NULL;
2648 INIT_LIST_HEAD(&new->list);
2649 list_add_tail(&new->list, list);
2653 static void __free_recorded_refs(struct list_head *head)
2655 struct recorded_ref *cur;
2657 while (!list_empty(head)) {
2658 cur = list_entry(head->next, struct recorded_ref, list);
2659 fs_path_free(cur->full_path);
2660 list_del(&cur->list);
2665 static void free_recorded_refs(struct send_ctx *sctx)
2667 __free_recorded_refs(&sctx->new_refs);
2668 __free_recorded_refs(&sctx->deleted_refs);
2672 * Renames/moves a file/dir to its orphan name. Used when the first
2673 * ref of an unprocessed inode gets overwritten and for all non empty
2676 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2677 struct fs_path *path)
2680 struct fs_path *orphan;
2682 orphan = fs_path_alloc();
2686 ret = gen_unique_name(sctx, ino, gen, orphan);
2690 ret = send_rename(sctx, path, orphan);
2693 fs_path_free(orphan);
2697 static struct orphan_dir_info *
2698 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2700 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2701 struct rb_node *parent = NULL;
2702 struct orphan_dir_info *entry, *odi;
2704 odi = kmalloc(sizeof(*odi), GFP_NOFS);
2706 return ERR_PTR(-ENOMEM);
2712 entry = rb_entry(parent, struct orphan_dir_info, node);
2713 if (dir_ino < entry->ino) {
2715 } else if (dir_ino > entry->ino) {
2716 p = &(*p)->rb_right;
2723 rb_link_node(&odi->node, parent, p);
2724 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2728 static struct orphan_dir_info *
2729 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2731 struct rb_node *n = sctx->orphan_dirs.rb_node;
2732 struct orphan_dir_info *entry;
2735 entry = rb_entry(n, struct orphan_dir_info, node);
2736 if (dir_ino < entry->ino)
2738 else if (dir_ino > entry->ino)
2746 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2748 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2753 static void free_orphan_dir_info(struct send_ctx *sctx,
2754 struct orphan_dir_info *odi)
2758 rb_erase(&odi->node, &sctx->orphan_dirs);
2763 * Returns 1 if a directory can be removed at this point in time.
2764 * We check this by iterating all dir items and checking if the inode behind
2765 * the dir item was already processed.
2767 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2771 struct btrfs_root *root = sctx->parent_root;
2772 struct btrfs_path *path;
2773 struct btrfs_key key;
2774 struct btrfs_key found_key;
2775 struct btrfs_key loc;
2776 struct btrfs_dir_item *di;
2779 * Don't try to rmdir the top/root subvolume dir.
2781 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2784 path = alloc_path_for_send();
2789 key.type = BTRFS_DIR_INDEX_KEY;
2791 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2796 struct waiting_dir_move *dm;
2798 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2799 ret = btrfs_next_leaf(root, path);
2806 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2808 if (found_key.objectid != key.objectid ||
2809 found_key.type != key.type)
2812 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2813 struct btrfs_dir_item);
2814 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2816 dm = get_waiting_dir_move(sctx, loc.objectid);
2818 struct orphan_dir_info *odi;
2820 odi = add_orphan_dir_info(sctx, dir);
2826 dm->rmdir_ino = dir;
2831 if (loc.objectid > send_progress) {
2842 btrfs_free_path(path);
2846 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
2848 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
2850 return entry != NULL;
2853 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2855 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
2856 struct rb_node *parent = NULL;
2857 struct waiting_dir_move *entry, *dm;
2859 dm = kmalloc(sizeof(*dm), GFP_NOFS);
2867 entry = rb_entry(parent, struct waiting_dir_move, node);
2868 if (ino < entry->ino) {
2870 } else if (ino > entry->ino) {
2871 p = &(*p)->rb_right;
2878 rb_link_node(&dm->node, parent, p);
2879 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
2883 static struct waiting_dir_move *
2884 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2886 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
2887 struct waiting_dir_move *entry;
2890 entry = rb_entry(n, struct waiting_dir_move, node);
2891 if (ino < entry->ino)
2893 else if (ino > entry->ino)
2901 static void free_waiting_dir_move(struct send_ctx *sctx,
2902 struct waiting_dir_move *dm)
2906 rb_erase(&dm->node, &sctx->waiting_dir_moves);
2910 static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino)
2912 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
2913 struct rb_node *parent = NULL;
2914 struct pending_dir_move *entry, *pm;
2915 struct recorded_ref *cur;
2919 pm = kmalloc(sizeof(*pm), GFP_NOFS);
2922 pm->parent_ino = parent_ino;
2923 pm->ino = sctx->cur_ino;
2924 pm->gen = sctx->cur_inode_gen;
2925 INIT_LIST_HEAD(&pm->list);
2926 INIT_LIST_HEAD(&pm->update_refs);
2927 RB_CLEAR_NODE(&pm->node);
2931 entry = rb_entry(parent, struct pending_dir_move, node);
2932 if (parent_ino < entry->parent_ino) {
2934 } else if (parent_ino > entry->parent_ino) {
2935 p = &(*p)->rb_right;
2942 list_for_each_entry(cur, &sctx->deleted_refs, list) {
2943 ret = dup_ref(cur, &pm->update_refs);
2947 list_for_each_entry(cur, &sctx->new_refs, list) {
2948 ret = dup_ref(cur, &pm->update_refs);
2953 ret = add_waiting_dir_move(sctx, pm->ino);
2958 list_add_tail(&pm->list, &entry->list);
2960 rb_link_node(&pm->node, parent, p);
2961 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
2966 __free_recorded_refs(&pm->update_refs);
2972 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
2975 struct rb_node *n = sctx->pending_dir_moves.rb_node;
2976 struct pending_dir_move *entry;
2979 entry = rb_entry(n, struct pending_dir_move, node);
2980 if (parent_ino < entry->parent_ino)
2982 else if (parent_ino > entry->parent_ino)
2990 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
2992 struct fs_path *from_path = NULL;
2993 struct fs_path *to_path = NULL;
2994 struct fs_path *name = NULL;
2995 u64 orig_progress = sctx->send_progress;
2996 struct recorded_ref *cur;
2997 u64 parent_ino, parent_gen;
2998 struct waiting_dir_move *dm = NULL;
3002 name = fs_path_alloc();
3003 from_path = fs_path_alloc();
3004 if (!name || !from_path) {
3009 dm = get_waiting_dir_move(sctx, pm->ino);
3011 rmdir_ino = dm->rmdir_ino;
3012 free_waiting_dir_move(sctx, dm);
3014 ret = get_first_ref(sctx->parent_root, pm->ino,
3015 &parent_ino, &parent_gen, name);
3019 if (parent_ino == sctx->cur_ino) {
3020 /* child only renamed, not moved */
3021 ASSERT(parent_gen == sctx->cur_inode_gen);
3022 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3026 ret = fs_path_add_path(from_path, name);
3030 /* child moved and maybe renamed too */
3031 sctx->send_progress = pm->ino;
3032 ret = get_cur_path(sctx, pm->ino, pm->gen, from_path);
3040 to_path = fs_path_alloc();
3046 sctx->send_progress = sctx->cur_ino + 1;
3047 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3051 ret = send_rename(sctx, from_path, to_path);
3056 struct orphan_dir_info *odi;
3058 odi = get_orphan_dir_info(sctx, rmdir_ino);
3060 /* already deleted */
3063 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
3069 name = fs_path_alloc();
3074 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
3077 ret = send_rmdir(sctx, name);
3080 free_orphan_dir_info(sctx, odi);
3084 ret = send_utimes(sctx, pm->ino, pm->gen);
3089 * After rename/move, need to update the utimes of both new parent(s)
3090 * and old parent(s).
3092 list_for_each_entry(cur, &pm->update_refs, list) {
3093 if (cur->dir == rmdir_ino)
3095 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3102 fs_path_free(from_path);
3103 fs_path_free(to_path);
3104 sctx->send_progress = orig_progress;
3109 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3111 if (!list_empty(&m->list))
3113 if (!RB_EMPTY_NODE(&m->node))
3114 rb_erase(&m->node, &sctx->pending_dir_moves);
3115 __free_recorded_refs(&m->update_refs);
3119 static void tail_append_pending_moves(struct pending_dir_move *moves,
3120 struct list_head *stack)
3122 if (list_empty(&moves->list)) {
3123 list_add_tail(&moves->list, stack);
3126 list_splice_init(&moves->list, &list);
3127 list_add_tail(&moves->list, stack);
3128 list_splice_tail(&list, stack);
3132 static int apply_children_dir_moves(struct send_ctx *sctx)
3134 struct pending_dir_move *pm;
3135 struct list_head stack;
3136 u64 parent_ino = sctx->cur_ino;
3139 pm = get_pending_dir_moves(sctx, parent_ino);
3143 INIT_LIST_HEAD(&stack);
3144 tail_append_pending_moves(pm, &stack);
3146 while (!list_empty(&stack)) {
3147 pm = list_first_entry(&stack, struct pending_dir_move, list);
3148 parent_ino = pm->ino;
3149 ret = apply_dir_move(sctx, pm);
3150 free_pending_move(sctx, pm);
3153 pm = get_pending_dir_moves(sctx, parent_ino);
3155 tail_append_pending_moves(pm, &stack);
3160 while (!list_empty(&stack)) {
3161 pm = list_first_entry(&stack, struct pending_dir_move, list);
3162 free_pending_move(sctx, pm);
3167 static int wait_for_parent_move(struct send_ctx *sctx,
3168 struct recorded_ref *parent_ref)
3171 u64 ino = parent_ref->dir;
3172 u64 parent_ino_before, parent_ino_after;
3173 u64 new_gen, old_gen;
3174 struct fs_path *path_before = NULL;
3175 struct fs_path *path_after = NULL;
3178 if (parent_ref->dir <= sctx->cur_ino)
3181 if (is_waiting_for_move(sctx, ino))
3184 ret = get_inode_info(sctx->parent_root, ino, NULL, &old_gen,
3185 NULL, NULL, NULL, NULL);
3191 ret = get_inode_info(sctx->send_root, ino, NULL, &new_gen,
3192 NULL, NULL, NULL, NULL);
3196 if (new_gen != old_gen)
3199 path_before = fs_path_alloc();
3203 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3205 if (ret == -ENOENT) {
3208 } else if (ret < 0) {
3212 path_after = fs_path_alloc();
3218 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3220 if (ret == -ENOENT) {
3223 } else if (ret < 0) {
3227 len1 = fs_path_len(path_before);
3228 len2 = fs_path_len(path_after);
3229 if (parent_ino_before != parent_ino_after || len1 != len2 ||
3230 memcmp(path_before->start, path_after->start, len1)) {
3237 fs_path_free(path_before);
3238 fs_path_free(path_after);
3244 * This does all the move/link/unlink/rmdir magic.
3246 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3249 struct recorded_ref *cur;
3250 struct recorded_ref *cur2;
3251 struct list_head check_dirs;
3252 struct fs_path *valid_path = NULL;
3255 int did_overwrite = 0;
3257 u64 last_dir_ino_rm = 0;
3259 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3262 * This should never happen as the root dir always has the same ref
3263 * which is always '..'
3265 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3266 INIT_LIST_HEAD(&check_dirs);
3268 valid_path = fs_path_alloc();
3275 * First, check if the first ref of the current inode was overwritten
3276 * before. If yes, we know that the current inode was already orphanized
3277 * and thus use the orphan name. If not, we can use get_cur_path to
3278 * get the path of the first ref as it would like while receiving at
3279 * this point in time.
3280 * New inodes are always orphan at the beginning, so force to use the
3281 * orphan name in this case.
3282 * The first ref is stored in valid_path and will be updated if it
3283 * gets moved around.
3285 if (!sctx->cur_inode_new) {
3286 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3287 sctx->cur_inode_gen);
3293 if (sctx->cur_inode_new || did_overwrite) {
3294 ret = gen_unique_name(sctx, sctx->cur_ino,
3295 sctx->cur_inode_gen, valid_path);
3300 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3306 list_for_each_entry(cur, &sctx->new_refs, list) {
3308 * We may have refs where the parent directory does not exist
3309 * yet. This happens if the parent directories inum is higher
3310 * the the current inum. To handle this case, we create the
3311 * parent directory out of order. But we need to check if this
3312 * did already happen before due to other refs in the same dir.
3314 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3317 if (ret == inode_state_will_create) {
3320 * First check if any of the current inodes refs did
3321 * already create the dir.
3323 list_for_each_entry(cur2, &sctx->new_refs, list) {
3326 if (cur2->dir == cur->dir) {
3333 * If that did not happen, check if a previous inode
3334 * did already create the dir.
3337 ret = did_create_dir(sctx, cur->dir);
3341 ret = send_create_inode(sctx, cur->dir);
3348 * Check if this new ref would overwrite the first ref of
3349 * another unprocessed inode. If yes, orphanize the
3350 * overwritten inode. If we find an overwritten ref that is
3351 * not the first ref, simply unlink it.
3353 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3354 cur->name, cur->name_len,
3355 &ow_inode, &ow_gen);
3359 ret = is_first_ref(sctx->parent_root,
3360 ow_inode, cur->dir, cur->name,
3365 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3370 ret = send_unlink(sctx, cur->full_path);
3377 * link/move the ref to the new place. If we have an orphan
3378 * inode, move it and update valid_path. If not, link or move
3379 * it depending on the inode mode.
3382 ret = send_rename(sctx, valid_path, cur->full_path);
3386 ret = fs_path_copy(valid_path, cur->full_path);
3390 if (S_ISDIR(sctx->cur_inode_mode)) {
3392 * Dirs can't be linked, so move it. For moved
3393 * dirs, we always have one new and one deleted
3394 * ref. The deleted ref is ignored later.
3396 ret = wait_for_parent_move(sctx, cur);
3400 ret = add_pending_dir_move(sctx,
3404 ret = send_rename(sctx, valid_path,
3407 ret = fs_path_copy(valid_path,
3413 ret = send_link(sctx, cur->full_path,
3419 ret = dup_ref(cur, &check_dirs);
3424 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
3426 * Check if we can already rmdir the directory. If not,
3427 * orphanize it. For every dir item inside that gets deleted
3428 * later, we do this check again and rmdir it then if possible.
3429 * See the use of check_dirs for more details.
3431 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3436 ret = send_rmdir(sctx, valid_path);
3439 } else if (!is_orphan) {
3440 ret = orphanize_inode(sctx, sctx->cur_ino,
3441 sctx->cur_inode_gen, valid_path);
3447 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3448 ret = dup_ref(cur, &check_dirs);
3452 } else if (S_ISDIR(sctx->cur_inode_mode) &&
3453 !list_empty(&sctx->deleted_refs)) {
3455 * We have a moved dir. Add the old parent to check_dirs
3457 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
3459 ret = dup_ref(cur, &check_dirs);
3462 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
3464 * We have a non dir inode. Go through all deleted refs and
3465 * unlink them if they were not already overwritten by other
3468 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3469 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3470 sctx->cur_ino, sctx->cur_inode_gen,
3471 cur->name, cur->name_len);
3475 ret = send_unlink(sctx, cur->full_path);
3479 ret = dup_ref(cur, &check_dirs);
3484 * If the inode is still orphan, unlink the orphan. This may
3485 * happen when a previous inode did overwrite the first ref
3486 * of this inode and no new refs were added for the current
3487 * inode. Unlinking does not mean that the inode is deleted in
3488 * all cases. There may still be links to this inode in other
3492 ret = send_unlink(sctx, valid_path);
3499 * We did collect all parent dirs where cur_inode was once located. We
3500 * now go through all these dirs and check if they are pending for
3501 * deletion and if it's finally possible to perform the rmdir now.
3502 * We also update the inode stats of the parent dirs here.
3504 list_for_each_entry(cur, &check_dirs, list) {
3506 * In case we had refs into dirs that were not processed yet,
3507 * we don't need to do the utime and rmdir logic for these dirs.
3508 * The dir will be processed later.
3510 if (cur->dir > sctx->cur_ino)
3513 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3517 if (ret == inode_state_did_create ||
3518 ret == inode_state_no_change) {
3519 /* TODO delayed utimes */
3520 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3523 } else if (ret == inode_state_did_delete &&
3524 cur->dir != last_dir_ino_rm) {
3525 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
3530 ret = get_cur_path(sctx, cur->dir,
3531 cur->dir_gen, valid_path);
3534 ret = send_rmdir(sctx, valid_path);
3537 last_dir_ino_rm = cur->dir;
3545 __free_recorded_refs(&check_dirs);
3546 free_recorded_refs(sctx);
3547 fs_path_free(valid_path);
3551 static int __record_new_ref(int num, u64 dir, int index,
3552 struct fs_path *name,
3556 struct send_ctx *sctx = ctx;
3560 p = fs_path_alloc();
3564 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL,
3569 ret = get_cur_path(sctx, dir, gen, p);
3572 ret = fs_path_add_path(p, name);
3576 ret = record_ref(&sctx->new_refs, dir, gen, p);
3584 static int __record_deleted_ref(int num, u64 dir, int index,
3585 struct fs_path *name,
3589 struct send_ctx *sctx = ctx;
3593 p = fs_path_alloc();
3597 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL,
3602 ret = get_cur_path(sctx, dir, gen, p);
3605 ret = fs_path_add_path(p, name);
3609 ret = record_ref(&sctx->deleted_refs, dir, gen, p);
3617 static int record_new_ref(struct send_ctx *sctx)
3621 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3622 sctx->cmp_key, 0, __record_new_ref, sctx);
3631 static int record_deleted_ref(struct send_ctx *sctx)
3635 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3636 sctx->cmp_key, 0, __record_deleted_ref, sctx);
3645 struct find_ref_ctx {
3648 struct btrfs_root *root;
3649 struct fs_path *name;
3653 static int __find_iref(int num, u64 dir, int index,
3654 struct fs_path *name,
3657 struct find_ref_ctx *ctx = ctx_;
3661 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
3662 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
3664 * To avoid doing extra lookups we'll only do this if everything
3667 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
3671 if (dir_gen != ctx->dir_gen)
3673 ctx->found_idx = num;
3679 static int find_iref(struct btrfs_root *root,
3680 struct btrfs_path *path,
3681 struct btrfs_key *key,
3682 u64 dir, u64 dir_gen, struct fs_path *name)
3685 struct find_ref_ctx ctx;
3689 ctx.dir_gen = dir_gen;
3693 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
3697 if (ctx.found_idx == -1)
3700 return ctx.found_idx;
3703 static int __record_changed_new_ref(int num, u64 dir, int index,
3704 struct fs_path *name,
3709 struct send_ctx *sctx = ctx;
3711 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
3716 ret = find_iref(sctx->parent_root, sctx->right_path,
3717 sctx->cmp_key, dir, dir_gen, name);
3719 ret = __record_new_ref(num, dir, index, name, sctx);
3726 static int __record_changed_deleted_ref(int num, u64 dir, int index,
3727 struct fs_path *name,
3732 struct send_ctx *sctx = ctx;
3734 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
3739 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
3740 dir, dir_gen, name);
3742 ret = __record_deleted_ref(num, dir, index, name, sctx);
3749 static int record_changed_ref(struct send_ctx *sctx)
3753 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3754 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
3757 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3758 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
3768 * Record and process all refs at once. Needed when an inode changes the
3769 * generation number, which means that it was deleted and recreated.
3771 static int process_all_refs(struct send_ctx *sctx,
3772 enum btrfs_compare_tree_result cmd)
3775 struct btrfs_root *root;
3776 struct btrfs_path *path;
3777 struct btrfs_key key;
3778 struct btrfs_key found_key;
3779 struct extent_buffer *eb;
3781 iterate_inode_ref_t cb;
3782 int pending_move = 0;
3784 path = alloc_path_for_send();
3788 if (cmd == BTRFS_COMPARE_TREE_NEW) {
3789 root = sctx->send_root;
3790 cb = __record_new_ref;
3791 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
3792 root = sctx->parent_root;
3793 cb = __record_deleted_ref;
3795 btrfs_err(sctx->send_root->fs_info,
3796 "Wrong command %d in process_all_refs", cmd);
3801 key.objectid = sctx->cmp_key->objectid;
3802 key.type = BTRFS_INODE_REF_KEY;
3804 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3809 eb = path->nodes[0];
3810 slot = path->slots[0];
3811 if (slot >= btrfs_header_nritems(eb)) {
3812 ret = btrfs_next_leaf(root, path);
3820 btrfs_item_key_to_cpu(eb, &found_key, slot);
3822 if (found_key.objectid != key.objectid ||
3823 (found_key.type != BTRFS_INODE_REF_KEY &&
3824 found_key.type != BTRFS_INODE_EXTREF_KEY))
3827 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
3833 btrfs_release_path(path);
3835 ret = process_recorded_refs(sctx, &pending_move);
3836 /* Only applicable to an incremental send. */
3837 ASSERT(pending_move == 0);
3840 btrfs_free_path(path);
3844 static int send_set_xattr(struct send_ctx *sctx,
3845 struct fs_path *path,
3846 const char *name, int name_len,
3847 const char *data, int data_len)
3851 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
3855 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3856 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3857 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
3859 ret = send_cmd(sctx);
3866 static int send_remove_xattr(struct send_ctx *sctx,
3867 struct fs_path *path,
3868 const char *name, int name_len)
3872 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
3876 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3877 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3879 ret = send_cmd(sctx);
3886 static int __process_new_xattr(int num, struct btrfs_key *di_key,
3887 const char *name, int name_len,
3888 const char *data, int data_len,
3892 struct send_ctx *sctx = ctx;
3894 posix_acl_xattr_header dummy_acl;
3896 p = fs_path_alloc();
3901 * This hack is needed because empty acl's are stored as zero byte
3902 * data in xattrs. Problem with that is, that receiving these zero byte
3903 * acl's will fail later. To fix this, we send a dummy acl list that
3904 * only contains the version number and no entries.
3906 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
3907 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
3908 if (data_len == 0) {
3909 dummy_acl.a_version =
3910 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
3911 data = (char *)&dummy_acl;
3912 data_len = sizeof(dummy_acl);
3916 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3920 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
3927 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
3928 const char *name, int name_len,
3929 const char *data, int data_len,
3933 struct send_ctx *sctx = ctx;
3936 p = fs_path_alloc();
3940 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3944 ret = send_remove_xattr(sctx, p, name, name_len);
3951 static int process_new_xattr(struct send_ctx *sctx)
3955 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
3956 sctx->cmp_key, __process_new_xattr, sctx);
3961 static int process_deleted_xattr(struct send_ctx *sctx)
3965 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
3966 sctx->cmp_key, __process_deleted_xattr, sctx);
3971 struct find_xattr_ctx {
3979 static int __find_xattr(int num, struct btrfs_key *di_key,
3980 const char *name, int name_len,
3981 const char *data, int data_len,
3982 u8 type, void *vctx)
3984 struct find_xattr_ctx *ctx = vctx;
3986 if (name_len == ctx->name_len &&
3987 strncmp(name, ctx->name, name_len) == 0) {
3988 ctx->found_idx = num;
3989 ctx->found_data_len = data_len;
3990 ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
3991 if (!ctx->found_data)
3998 static int find_xattr(struct btrfs_root *root,
3999 struct btrfs_path *path,
4000 struct btrfs_key *key,
4001 const char *name, int name_len,
4002 char **data, int *data_len)
4005 struct find_xattr_ctx ctx;
4008 ctx.name_len = name_len;
4010 ctx.found_data = NULL;
4011 ctx.found_data_len = 0;
4013 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
4017 if (ctx.found_idx == -1)
4020 *data = ctx.found_data;
4021 *data_len = ctx.found_data_len;
4023 kfree(ctx.found_data);
4025 return ctx.found_idx;
4029 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4030 const char *name, int name_len,
4031 const char *data, int data_len,
4035 struct send_ctx *sctx = ctx;
4036 char *found_data = NULL;
4037 int found_data_len = 0;
4039 ret = find_xattr(sctx->parent_root, sctx->right_path,
4040 sctx->cmp_key, name, name_len, &found_data,
4042 if (ret == -ENOENT) {
4043 ret = __process_new_xattr(num, di_key, name, name_len, data,
4044 data_len, type, ctx);
4045 } else if (ret >= 0) {
4046 if (data_len != found_data_len ||
4047 memcmp(data, found_data, data_len)) {
4048 ret = __process_new_xattr(num, di_key, name, name_len,
4049 data, data_len, type, ctx);
4059 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4060 const char *name, int name_len,
4061 const char *data, int data_len,
4065 struct send_ctx *sctx = ctx;
4067 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4068 name, name_len, NULL, NULL);
4070 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4071 data_len, type, ctx);
4078 static int process_changed_xattr(struct send_ctx *sctx)
4082 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4083 sctx->cmp_key, __process_changed_new_xattr, sctx);
4086 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4087 sctx->cmp_key, __process_changed_deleted_xattr, sctx);
4093 static int process_all_new_xattrs(struct send_ctx *sctx)
4096 struct btrfs_root *root;
4097 struct btrfs_path *path;
4098 struct btrfs_key key;
4099 struct btrfs_key found_key;
4100 struct extent_buffer *eb;
4103 path = alloc_path_for_send();
4107 root = sctx->send_root;
4109 key.objectid = sctx->cmp_key->objectid;
4110 key.type = BTRFS_XATTR_ITEM_KEY;
4112 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4117 eb = path->nodes[0];
4118 slot = path->slots[0];
4119 if (slot >= btrfs_header_nritems(eb)) {
4120 ret = btrfs_next_leaf(root, path);
4123 } else if (ret > 0) {
4130 btrfs_item_key_to_cpu(eb, &found_key, slot);
4131 if (found_key.objectid != key.objectid ||
4132 found_key.type != key.type) {
4137 ret = iterate_dir_item(root, path, &found_key,
4138 __process_new_xattr, sctx);
4146 btrfs_free_path(path);
4150 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4152 struct btrfs_root *root = sctx->send_root;
4153 struct btrfs_fs_info *fs_info = root->fs_info;
4154 struct inode *inode;
4157 struct btrfs_key key;
4158 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
4160 unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
4163 key.objectid = sctx->cur_ino;
4164 key.type = BTRFS_INODE_ITEM_KEY;
4167 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4169 return PTR_ERR(inode);
4171 if (offset + len > i_size_read(inode)) {
4172 if (offset > i_size_read(inode))
4175 len = offset - i_size_read(inode);
4180 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
4181 while (index <= last_index) {
4182 unsigned cur_len = min_t(unsigned, len,
4183 PAGE_CACHE_SIZE - pg_offset);
4184 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4190 if (!PageUptodate(page)) {
4191 btrfs_readpage(NULL, page);
4193 if (!PageUptodate(page)) {
4195 page_cache_release(page);
4202 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4205 page_cache_release(page);
4217 * Read some bytes from the current inode/file and send a write command to
4220 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4224 ssize_t num_read = 0;
4226 p = fs_path_alloc();
4230 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
4232 num_read = fill_read_buf(sctx, offset, len);
4233 if (num_read <= 0) {
4239 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4243 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4247 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4248 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4249 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4251 ret = send_cmd(sctx);
4262 * Send a clone command to user space.
4264 static int send_clone(struct send_ctx *sctx,
4265 u64 offset, u32 len,
4266 struct clone_root *clone_root)
4272 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
4273 "clone_inode=%llu, clone_offset=%llu\n", offset, len,
4274 clone_root->root->objectid, clone_root->ino,
4275 clone_root->offset);
4277 p = fs_path_alloc();
4281 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4285 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4289 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4290 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4291 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4293 if (clone_root->root == sctx->send_root) {
4294 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4295 &gen, NULL, NULL, NULL, NULL);
4298 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4300 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4305 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4306 clone_root->root->root_item.uuid);
4307 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4308 le64_to_cpu(clone_root->root->root_item.ctransid));
4309 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4310 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4311 clone_root->offset);
4313 ret = send_cmd(sctx);
4322 * Send an update extent command to user space.
4324 static int send_update_extent(struct send_ctx *sctx,
4325 u64 offset, u32 len)
4330 p = fs_path_alloc();
4334 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4338 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4342 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4343 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4344 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4346 ret = send_cmd(sctx);
4354 static int send_hole(struct send_ctx *sctx, u64 end)
4356 struct fs_path *p = NULL;
4357 u64 offset = sctx->cur_inode_last_extent;
4361 p = fs_path_alloc();
4364 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
4365 while (offset < end) {
4366 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
4368 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4371 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4374 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4375 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4376 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
4377 ret = send_cmd(sctx);
4387 static int send_write_or_clone(struct send_ctx *sctx,
4388 struct btrfs_path *path,
4389 struct btrfs_key *key,
4390 struct clone_root *clone_root)
4393 struct btrfs_file_extent_item *ei;
4394 u64 offset = key->offset;
4399 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
4401 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4402 struct btrfs_file_extent_item);
4403 type = btrfs_file_extent_type(path->nodes[0], ei);
4404 if (type == BTRFS_FILE_EXTENT_INLINE) {
4405 len = btrfs_file_extent_inline_len(path->nodes[0],
4406 path->slots[0], ei);
4408 * it is possible the inline item won't cover the whole page,
4409 * but there may be items after this page. Make
4410 * sure to send the whole thing
4412 len = PAGE_CACHE_ALIGN(len);
4414 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
4417 if (offset + len > sctx->cur_inode_size)
4418 len = sctx->cur_inode_size - offset;
4424 if (clone_root && IS_ALIGNED(offset + len, bs)) {
4425 ret = send_clone(sctx, offset, len, clone_root);
4426 } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
4427 ret = send_update_extent(sctx, offset, len);
4431 if (l > BTRFS_SEND_READ_SIZE)
4432 l = BTRFS_SEND_READ_SIZE;
4433 ret = send_write(sctx, pos + offset, l);
4446 static int is_extent_unchanged(struct send_ctx *sctx,
4447 struct btrfs_path *left_path,
4448 struct btrfs_key *ekey)
4451 struct btrfs_key key;
4452 struct btrfs_path *path = NULL;
4453 struct extent_buffer *eb;
4455 struct btrfs_key found_key;
4456 struct btrfs_file_extent_item *ei;
4461 u64 left_offset_fixed;
4469 path = alloc_path_for_send();
4473 eb = left_path->nodes[0];
4474 slot = left_path->slots[0];
4475 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
4476 left_type = btrfs_file_extent_type(eb, ei);
4478 if (left_type != BTRFS_FILE_EXTENT_REG) {
4482 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
4483 left_len = btrfs_file_extent_num_bytes(eb, ei);
4484 left_offset = btrfs_file_extent_offset(eb, ei);
4485 left_gen = btrfs_file_extent_generation(eb, ei);
4488 * Following comments will refer to these graphics. L is the left
4489 * extents which we are checking at the moment. 1-8 are the right
4490 * extents that we iterate.
4493 * |-1-|-2a-|-3-|-4-|-5-|-6-|
4496 * |--1--|-2b-|...(same as above)
4498 * Alternative situation. Happens on files where extents got split.
4500 * |-----------7-----------|-6-|
4502 * Alternative situation. Happens on files which got larger.
4505 * Nothing follows after 8.
4508 key.objectid = ekey->objectid;
4509 key.type = BTRFS_EXTENT_DATA_KEY;
4510 key.offset = ekey->offset;
4511 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
4520 * Handle special case where the right side has no extents at all.
4522 eb = path->nodes[0];
4523 slot = path->slots[0];
4524 btrfs_item_key_to_cpu(eb, &found_key, slot);
4525 if (found_key.objectid != key.objectid ||
4526 found_key.type != key.type) {
4527 /* If we're a hole then just pretend nothing changed */
4528 ret = (left_disknr) ? 0 : 1;
4533 * We're now on 2a, 2b or 7.
4536 while (key.offset < ekey->offset + left_len) {
4537 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
4538 right_type = btrfs_file_extent_type(eb, ei);
4539 if (right_type != BTRFS_FILE_EXTENT_REG) {
4544 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
4545 right_len = btrfs_file_extent_num_bytes(eb, ei);
4546 right_offset = btrfs_file_extent_offset(eb, ei);
4547 right_gen = btrfs_file_extent_generation(eb, ei);
4550 * Are we at extent 8? If yes, we know the extent is changed.
4551 * This may only happen on the first iteration.
4553 if (found_key.offset + right_len <= ekey->offset) {
4554 /* If we're a hole just pretend nothing changed */
4555 ret = (left_disknr) ? 0 : 1;
4559 left_offset_fixed = left_offset;
4560 if (key.offset < ekey->offset) {
4561 /* Fix the right offset for 2a and 7. */
4562 right_offset += ekey->offset - key.offset;
4564 /* Fix the left offset for all behind 2a and 2b */
4565 left_offset_fixed += key.offset - ekey->offset;
4569 * Check if we have the same extent.
4571 if (left_disknr != right_disknr ||
4572 left_offset_fixed != right_offset ||
4573 left_gen != right_gen) {
4579 * Go to the next extent.
4581 ret = btrfs_next_item(sctx->parent_root, path);
4585 eb = path->nodes[0];
4586 slot = path->slots[0];
4587 btrfs_item_key_to_cpu(eb, &found_key, slot);
4589 if (ret || found_key.objectid != key.objectid ||
4590 found_key.type != key.type) {
4591 key.offset += right_len;
4594 if (found_key.offset != key.offset + right_len) {
4602 * We're now behind the left extent (treat as unchanged) or at the end
4603 * of the right side (treat as changed).
4605 if (key.offset >= ekey->offset + left_len)
4612 btrfs_free_path(path);
4616 static int get_last_extent(struct send_ctx *sctx, u64 offset)
4618 struct btrfs_path *path;
4619 struct btrfs_root *root = sctx->send_root;
4620 struct btrfs_file_extent_item *fi;
4621 struct btrfs_key key;
4626 path = alloc_path_for_send();
4630 sctx->cur_inode_last_extent = 0;
4632 key.objectid = sctx->cur_ino;
4633 key.type = BTRFS_EXTENT_DATA_KEY;
4634 key.offset = offset;
4635 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
4639 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4640 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
4643 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4644 struct btrfs_file_extent_item);
4645 type = btrfs_file_extent_type(path->nodes[0], fi);
4646 if (type == BTRFS_FILE_EXTENT_INLINE) {
4647 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
4648 path->slots[0], fi);
4649 extent_end = ALIGN(key.offset + size,
4650 sctx->send_root->sectorsize);
4652 extent_end = key.offset +
4653 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4655 sctx->cur_inode_last_extent = extent_end;
4657 btrfs_free_path(path);
4661 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
4662 struct btrfs_key *key)
4664 struct btrfs_file_extent_item *fi;
4669 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
4672 if (sctx->cur_inode_last_extent == (u64)-1) {
4673 ret = get_last_extent(sctx, key->offset - 1);
4678 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4679 struct btrfs_file_extent_item);
4680 type = btrfs_file_extent_type(path->nodes[0], fi);
4681 if (type == BTRFS_FILE_EXTENT_INLINE) {
4682 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
4683 path->slots[0], fi);
4684 extent_end = ALIGN(key->offset + size,
4685 sctx->send_root->sectorsize);
4687 extent_end = key->offset +
4688 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4691 if (path->slots[0] == 0 &&
4692 sctx->cur_inode_last_extent < key->offset) {
4694 * We might have skipped entire leafs that contained only
4695 * file extent items for our current inode. These leafs have
4696 * a generation number smaller (older) than the one in the
4697 * current leaf and the leaf our last extent came from, and
4698 * are located between these 2 leafs.
4700 ret = get_last_extent(sctx, key->offset - 1);
4705 if (sctx->cur_inode_last_extent < key->offset)
4706 ret = send_hole(sctx, key->offset);
4707 sctx->cur_inode_last_extent = extent_end;
4711 static int process_extent(struct send_ctx *sctx,
4712 struct btrfs_path *path,
4713 struct btrfs_key *key)
4715 struct clone_root *found_clone = NULL;
4718 if (S_ISLNK(sctx->cur_inode_mode))
4721 if (sctx->parent_root && !sctx->cur_inode_new) {
4722 ret = is_extent_unchanged(sctx, path, key);
4730 struct btrfs_file_extent_item *ei;
4733 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4734 struct btrfs_file_extent_item);
4735 type = btrfs_file_extent_type(path->nodes[0], ei);
4736 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
4737 type == BTRFS_FILE_EXTENT_REG) {
4739 * The send spec does not have a prealloc command yet,
4740 * so just leave a hole for prealloc'ed extents until
4741 * we have enough commands queued up to justify rev'ing
4744 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
4749 /* Have a hole, just skip it. */
4750 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
4757 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
4758 sctx->cur_inode_size, &found_clone);
4759 if (ret != -ENOENT && ret < 0)
4762 ret = send_write_or_clone(sctx, path, key, found_clone);
4766 ret = maybe_send_hole(sctx, path, key);
4771 static int process_all_extents(struct send_ctx *sctx)
4774 struct btrfs_root *root;
4775 struct btrfs_path *path;
4776 struct btrfs_key key;
4777 struct btrfs_key found_key;
4778 struct extent_buffer *eb;
4781 root = sctx->send_root;
4782 path = alloc_path_for_send();
4786 key.objectid = sctx->cmp_key->objectid;
4787 key.type = BTRFS_EXTENT_DATA_KEY;
4789 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4794 eb = path->nodes[0];
4795 slot = path->slots[0];
4797 if (slot >= btrfs_header_nritems(eb)) {
4798 ret = btrfs_next_leaf(root, path);
4801 } else if (ret > 0) {
4808 btrfs_item_key_to_cpu(eb, &found_key, slot);
4810 if (found_key.objectid != key.objectid ||
4811 found_key.type != key.type) {
4816 ret = process_extent(sctx, path, &found_key);
4824 btrfs_free_path(path);
4828 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
4830 int *refs_processed)
4834 if (sctx->cur_ino == 0)
4836 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
4837 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
4839 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
4842 ret = process_recorded_refs(sctx, pending_move);
4846 *refs_processed = 1;
4851 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
4862 int pending_move = 0;
4863 int refs_processed = 0;
4865 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
4871 * We have processed the refs and thus need to advance send_progress.
4872 * Now, calls to get_cur_xxx will take the updated refs of the current
4873 * inode into account.
4875 * On the other hand, if our current inode is a directory and couldn't
4876 * be moved/renamed because its parent was renamed/moved too and it has
4877 * a higher inode number, we can only move/rename our current inode
4878 * after we moved/renamed its parent. Therefore in this case operate on
4879 * the old path (pre move/rename) of our current inode, and the
4880 * move/rename will be performed later.
4882 if (refs_processed && !pending_move)
4883 sctx->send_progress = sctx->cur_ino + 1;
4885 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
4887 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
4890 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
4891 &left_mode, &left_uid, &left_gid, NULL);
4895 if (!sctx->parent_root || sctx->cur_inode_new) {
4897 if (!S_ISLNK(sctx->cur_inode_mode))
4900 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
4901 NULL, NULL, &right_mode, &right_uid,
4906 if (left_uid != right_uid || left_gid != right_gid)
4908 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
4912 if (S_ISREG(sctx->cur_inode_mode)) {
4913 if (need_send_hole(sctx)) {
4914 if (sctx->cur_inode_last_extent == (u64)-1) {
4915 ret = get_last_extent(sctx, (u64)-1);
4919 if (sctx->cur_inode_last_extent <
4920 sctx->cur_inode_size) {
4921 ret = send_hole(sctx, sctx->cur_inode_size);
4926 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4927 sctx->cur_inode_size);
4933 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4934 left_uid, left_gid);
4939 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4946 * If other directory inodes depended on our current directory
4947 * inode's move/rename, now do their move/rename operations.
4949 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
4950 ret = apply_children_dir_moves(sctx);
4956 * Need to send that every time, no matter if it actually
4957 * changed between the two trees as we have done changes to
4960 sctx->send_progress = sctx->cur_ino + 1;
4961 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
4969 static int changed_inode(struct send_ctx *sctx,
4970 enum btrfs_compare_tree_result result)
4973 struct btrfs_key *key = sctx->cmp_key;
4974 struct btrfs_inode_item *left_ii = NULL;
4975 struct btrfs_inode_item *right_ii = NULL;
4979 sctx->cur_ino = key->objectid;
4980 sctx->cur_inode_new_gen = 0;
4981 sctx->cur_inode_last_extent = (u64)-1;
4984 * Set send_progress to current inode. This will tell all get_cur_xxx
4985 * functions that the current inode's refs are not updated yet. Later,
4986 * when process_recorded_refs is finished, it is set to cur_ino + 1.
4988 sctx->send_progress = sctx->cur_ino;
4990 if (result == BTRFS_COMPARE_TREE_NEW ||
4991 result == BTRFS_COMPARE_TREE_CHANGED) {
4992 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
4993 sctx->left_path->slots[0],
4994 struct btrfs_inode_item);
4995 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
4998 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
4999 sctx->right_path->slots[0],
5000 struct btrfs_inode_item);
5001 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5004 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5005 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5006 sctx->right_path->slots[0],
5007 struct btrfs_inode_item);
5009 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5013 * The cur_ino = root dir case is special here. We can't treat
5014 * the inode as deleted+reused because it would generate a
5015 * stream that tries to delete/mkdir the root dir.
5017 if (left_gen != right_gen &&
5018 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5019 sctx->cur_inode_new_gen = 1;
5022 if (result == BTRFS_COMPARE_TREE_NEW) {
5023 sctx->cur_inode_gen = left_gen;
5024 sctx->cur_inode_new = 1;
5025 sctx->cur_inode_deleted = 0;
5026 sctx->cur_inode_size = btrfs_inode_size(
5027 sctx->left_path->nodes[0], left_ii);
5028 sctx->cur_inode_mode = btrfs_inode_mode(
5029 sctx->left_path->nodes[0], left_ii);
5030 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5031 ret = send_create_inode_if_needed(sctx);
5032 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
5033 sctx->cur_inode_gen = right_gen;
5034 sctx->cur_inode_new = 0;
5035 sctx->cur_inode_deleted = 1;
5036 sctx->cur_inode_size = btrfs_inode_size(
5037 sctx->right_path->nodes[0], right_ii);
5038 sctx->cur_inode_mode = btrfs_inode_mode(
5039 sctx->right_path->nodes[0], right_ii);
5040 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
5042 * We need to do some special handling in case the inode was
5043 * reported as changed with a changed generation number. This
5044 * means that the original inode was deleted and new inode
5045 * reused the same inum. So we have to treat the old inode as
5046 * deleted and the new one as new.
5048 if (sctx->cur_inode_new_gen) {
5050 * First, process the inode as if it was deleted.
5052 sctx->cur_inode_gen = right_gen;
5053 sctx->cur_inode_new = 0;
5054 sctx->cur_inode_deleted = 1;
5055 sctx->cur_inode_size = btrfs_inode_size(
5056 sctx->right_path->nodes[0], right_ii);
5057 sctx->cur_inode_mode = btrfs_inode_mode(
5058 sctx->right_path->nodes[0], right_ii);
5059 ret = process_all_refs(sctx,
5060 BTRFS_COMPARE_TREE_DELETED);
5065 * Now process the inode as if it was new.
5067 sctx->cur_inode_gen = left_gen;
5068 sctx->cur_inode_new = 1;
5069 sctx->cur_inode_deleted = 0;
5070 sctx->cur_inode_size = btrfs_inode_size(
5071 sctx->left_path->nodes[0], left_ii);
5072 sctx->cur_inode_mode = btrfs_inode_mode(
5073 sctx->left_path->nodes[0], left_ii);
5074 ret = send_create_inode_if_needed(sctx);
5078 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
5082 * Advance send_progress now as we did not get into
5083 * process_recorded_refs_if_needed in the new_gen case.
5085 sctx->send_progress = sctx->cur_ino + 1;
5088 * Now process all extents and xattrs of the inode as if
5089 * they were all new.
5091 ret = process_all_extents(sctx);
5094 ret = process_all_new_xattrs(sctx);
5098 sctx->cur_inode_gen = left_gen;
5099 sctx->cur_inode_new = 0;
5100 sctx->cur_inode_new_gen = 0;
5101 sctx->cur_inode_deleted = 0;
5102 sctx->cur_inode_size = btrfs_inode_size(
5103 sctx->left_path->nodes[0], left_ii);
5104 sctx->cur_inode_mode = btrfs_inode_mode(
5105 sctx->left_path->nodes[0], left_ii);
5114 * We have to process new refs before deleted refs, but compare_trees gives us
5115 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5116 * first and later process them in process_recorded_refs.
5117 * For the cur_inode_new_gen case, we skip recording completely because
5118 * changed_inode did already initiate processing of refs. The reason for this is
5119 * that in this case, compare_tree actually compares the refs of 2 different
5120 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5121 * refs of the right tree as deleted and all refs of the left tree as new.
5123 static int changed_ref(struct send_ctx *sctx,
5124 enum btrfs_compare_tree_result result)
5128 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5130 if (!sctx->cur_inode_new_gen &&
5131 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
5132 if (result == BTRFS_COMPARE_TREE_NEW)
5133 ret = record_new_ref(sctx);
5134 else if (result == BTRFS_COMPARE_TREE_DELETED)
5135 ret = record_deleted_ref(sctx);
5136 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5137 ret = record_changed_ref(sctx);
5144 * Process new/deleted/changed xattrs. We skip processing in the
5145 * cur_inode_new_gen case because changed_inode did already initiate processing
5146 * of xattrs. The reason is the same as in changed_ref
5148 static int changed_xattr(struct send_ctx *sctx,
5149 enum btrfs_compare_tree_result result)
5153 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5155 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5156 if (result == BTRFS_COMPARE_TREE_NEW)
5157 ret = process_new_xattr(sctx);
5158 else if (result == BTRFS_COMPARE_TREE_DELETED)
5159 ret = process_deleted_xattr(sctx);
5160 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5161 ret = process_changed_xattr(sctx);
5168 * Process new/deleted/changed extents. We skip processing in the
5169 * cur_inode_new_gen case because changed_inode did already initiate processing
5170 * of extents. The reason is the same as in changed_ref
5172 static int changed_extent(struct send_ctx *sctx,
5173 enum btrfs_compare_tree_result result)
5177 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5179 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5180 if (result != BTRFS_COMPARE_TREE_DELETED)
5181 ret = process_extent(sctx, sctx->left_path,
5188 static int dir_changed(struct send_ctx *sctx, u64 dir)
5190 u64 orig_gen, new_gen;
5193 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
5198 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
5203 return (orig_gen != new_gen) ? 1 : 0;
5206 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
5207 struct btrfs_key *key)
5209 struct btrfs_inode_extref *extref;
5210 struct extent_buffer *leaf;
5211 u64 dirid = 0, last_dirid = 0;
5218 /* Easy case, just check this one dirid */
5219 if (key->type == BTRFS_INODE_REF_KEY) {
5220 dirid = key->offset;
5222 ret = dir_changed(sctx, dirid);
5226 leaf = path->nodes[0];
5227 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
5228 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
5229 while (cur_offset < item_size) {
5230 extref = (struct btrfs_inode_extref *)(ptr +
5232 dirid = btrfs_inode_extref_parent(leaf, extref);
5233 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
5234 cur_offset += ref_name_len + sizeof(*extref);
5235 if (dirid == last_dirid)
5237 ret = dir_changed(sctx, dirid);
5247 * Updates compare related fields in sctx and simply forwards to the actual
5248 * changed_xxx functions.
5250 static int changed_cb(struct btrfs_root *left_root,
5251 struct btrfs_root *right_root,
5252 struct btrfs_path *left_path,
5253 struct btrfs_path *right_path,
5254 struct btrfs_key *key,
5255 enum btrfs_compare_tree_result result,
5259 struct send_ctx *sctx = ctx;
5261 if (result == BTRFS_COMPARE_TREE_SAME) {
5262 if (key->type == BTRFS_INODE_REF_KEY ||
5263 key->type == BTRFS_INODE_EXTREF_KEY) {
5264 ret = compare_refs(sctx, left_path, key);
5269 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
5270 return maybe_send_hole(sctx, left_path, key);
5274 result = BTRFS_COMPARE_TREE_CHANGED;
5278 sctx->left_path = left_path;
5279 sctx->right_path = right_path;
5280 sctx->cmp_key = key;
5282 ret = finish_inode_if_needed(sctx, 0);
5286 /* Ignore non-FS objects */
5287 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
5288 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
5291 if (key->type == BTRFS_INODE_ITEM_KEY)
5292 ret = changed_inode(sctx, result);
5293 else if (key->type == BTRFS_INODE_REF_KEY ||
5294 key->type == BTRFS_INODE_EXTREF_KEY)
5295 ret = changed_ref(sctx, result);
5296 else if (key->type == BTRFS_XATTR_ITEM_KEY)
5297 ret = changed_xattr(sctx, result);
5298 else if (key->type == BTRFS_EXTENT_DATA_KEY)
5299 ret = changed_extent(sctx, result);
5305 static int full_send_tree(struct send_ctx *sctx)
5308 struct btrfs_trans_handle *trans = NULL;
5309 struct btrfs_root *send_root = sctx->send_root;
5310 struct btrfs_key key;
5311 struct btrfs_key found_key;
5312 struct btrfs_path *path;
5313 struct extent_buffer *eb;
5318 path = alloc_path_for_send();
5322 spin_lock(&send_root->root_item_lock);
5323 start_ctransid = btrfs_root_ctransid(&send_root->root_item);
5324 spin_unlock(&send_root->root_item_lock);
5326 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
5327 key.type = BTRFS_INODE_ITEM_KEY;
5332 * We need to make sure the transaction does not get committed
5333 * while we do anything on commit roots. Join a transaction to prevent
5336 trans = btrfs_join_transaction(send_root);
5337 if (IS_ERR(trans)) {
5338 ret = PTR_ERR(trans);
5344 * Make sure the tree has not changed after re-joining. We detect this
5345 * by comparing start_ctransid and ctransid. They should always match.
5347 spin_lock(&send_root->root_item_lock);
5348 ctransid = btrfs_root_ctransid(&send_root->root_item);
5349 spin_unlock(&send_root->root_item_lock);
5351 if (ctransid != start_ctransid) {
5352 WARN(1, KERN_WARNING "BTRFS: the root that you're trying to "
5353 "send was modified in between. This is "
5354 "probably a bug.\n");
5359 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
5367 * When someone want to commit while we iterate, end the
5368 * joined transaction and rejoin.
5370 if (btrfs_should_end_transaction(trans, send_root)) {
5371 ret = btrfs_end_transaction(trans, send_root);
5375 btrfs_release_path(path);
5379 eb = path->nodes[0];
5380 slot = path->slots[0];
5381 btrfs_item_key_to_cpu(eb, &found_key, slot);
5383 ret = changed_cb(send_root, NULL, path, NULL,
5384 &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
5388 key.objectid = found_key.objectid;
5389 key.type = found_key.type;
5390 key.offset = found_key.offset + 1;
5392 ret = btrfs_next_item(send_root, path);
5402 ret = finish_inode_if_needed(sctx, 1);
5405 btrfs_free_path(path);
5408 ret = btrfs_end_transaction(trans, send_root);
5410 btrfs_end_transaction(trans, send_root);
5415 static int send_subvol(struct send_ctx *sctx)
5419 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
5420 ret = send_header(sctx);
5425 ret = send_subvol_begin(sctx);
5429 if (sctx->parent_root) {
5430 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
5434 ret = finish_inode_if_needed(sctx, 1);
5438 ret = full_send_tree(sctx);
5444 free_recorded_refs(sctx);
5448 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
5450 spin_lock(&root->root_item_lock);
5451 root->send_in_progress--;
5453 * Not much left to do, we don't know why it's unbalanced and
5454 * can't blindly reset it to 0.
5456 if (root->send_in_progress < 0)
5457 btrfs_err(root->fs_info,
5458 "send_in_progres unbalanced %d root %llu\n",
5459 root->send_in_progress, root->root_key.objectid);
5460 spin_unlock(&root->root_item_lock);
5463 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
5466 struct btrfs_root *send_root;
5467 struct btrfs_root *clone_root;
5468 struct btrfs_fs_info *fs_info;
5469 struct btrfs_ioctl_send_args *arg = NULL;
5470 struct btrfs_key key;
5471 struct send_ctx *sctx = NULL;
5473 u64 *clone_sources_tmp = NULL;
5474 int clone_sources_to_rollback = 0;
5475 int sort_clone_roots = 0;
5478 if (!capable(CAP_SYS_ADMIN))
5481 send_root = BTRFS_I(file_inode(mnt_file))->root;
5482 fs_info = send_root->fs_info;
5485 * The subvolume must remain read-only during send, protect against
5488 spin_lock(&send_root->root_item_lock);
5489 send_root->send_in_progress++;
5490 spin_unlock(&send_root->root_item_lock);
5493 * This is done when we lookup the root, it should already be complete
5494 * by the time we get here.
5496 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
5499 * Userspace tools do the checks and warn the user if it's
5502 if (!btrfs_root_readonly(send_root)) {
5507 arg = memdup_user(arg_, sizeof(*arg));
5514 if (!access_ok(VERIFY_READ, arg->clone_sources,
5515 sizeof(*arg->clone_sources) *
5516 arg->clone_sources_count)) {
5521 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
5526 sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
5532 INIT_LIST_HEAD(&sctx->new_refs);
5533 INIT_LIST_HEAD(&sctx->deleted_refs);
5534 INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
5535 INIT_LIST_HEAD(&sctx->name_cache_list);
5537 sctx->flags = arg->flags;
5539 sctx->send_filp = fget(arg->send_fd);
5540 if (!sctx->send_filp) {
5545 sctx->send_root = send_root;
5546 sctx->clone_roots_cnt = arg->clone_sources_count;
5548 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
5549 sctx->send_buf = vmalloc(sctx->send_max_size);
5550 if (!sctx->send_buf) {
5555 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
5556 if (!sctx->read_buf) {
5561 sctx->pending_dir_moves = RB_ROOT;
5562 sctx->waiting_dir_moves = RB_ROOT;
5563 sctx->orphan_dirs = RB_ROOT;
5565 sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
5566 (arg->clone_sources_count + 1));
5567 if (!sctx->clone_roots) {
5572 if (arg->clone_sources_count) {
5573 clone_sources_tmp = vmalloc(arg->clone_sources_count *
5574 sizeof(*arg->clone_sources));
5575 if (!clone_sources_tmp) {
5580 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
5581 arg->clone_sources_count *
5582 sizeof(*arg->clone_sources));
5588 for (i = 0; i < arg->clone_sources_count; i++) {
5589 key.objectid = clone_sources_tmp[i];
5590 key.type = BTRFS_ROOT_ITEM_KEY;
5591 key.offset = (u64)-1;
5593 index = srcu_read_lock(&fs_info->subvol_srcu);
5595 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
5596 if (IS_ERR(clone_root)) {
5597 srcu_read_unlock(&fs_info->subvol_srcu, index);
5598 ret = PTR_ERR(clone_root);
5601 clone_sources_to_rollback = i + 1;
5602 spin_lock(&clone_root->root_item_lock);
5603 clone_root->send_in_progress++;
5604 if (!btrfs_root_readonly(clone_root)) {
5605 spin_unlock(&clone_root->root_item_lock);
5606 srcu_read_unlock(&fs_info->subvol_srcu, index);
5610 spin_unlock(&clone_root->root_item_lock);
5611 srcu_read_unlock(&fs_info->subvol_srcu, index);
5613 sctx->clone_roots[i].root = clone_root;
5615 vfree(clone_sources_tmp);
5616 clone_sources_tmp = NULL;
5619 if (arg->parent_root) {
5620 key.objectid = arg->parent_root;
5621 key.type = BTRFS_ROOT_ITEM_KEY;
5622 key.offset = (u64)-1;
5624 index = srcu_read_lock(&fs_info->subvol_srcu);
5626 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
5627 if (IS_ERR(sctx->parent_root)) {
5628 srcu_read_unlock(&fs_info->subvol_srcu, index);
5629 ret = PTR_ERR(sctx->parent_root);
5633 spin_lock(&sctx->parent_root->root_item_lock);
5634 sctx->parent_root->send_in_progress++;
5635 if (!btrfs_root_readonly(sctx->parent_root)) {
5636 spin_unlock(&sctx->parent_root->root_item_lock);
5637 srcu_read_unlock(&fs_info->subvol_srcu, index);
5641 spin_unlock(&sctx->parent_root->root_item_lock);
5643 srcu_read_unlock(&fs_info->subvol_srcu, index);
5647 * Clones from send_root are allowed, but only if the clone source
5648 * is behind the current send position. This is checked while searching
5649 * for possible clone sources.
5651 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
5653 /* We do a bsearch later */
5654 sort(sctx->clone_roots, sctx->clone_roots_cnt,
5655 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
5657 sort_clone_roots = 1;
5659 ret = send_subvol(sctx);
5663 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
5664 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
5667 ret = send_cmd(sctx);
5673 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
5674 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
5676 struct pending_dir_move *pm;
5678 n = rb_first(&sctx->pending_dir_moves);
5679 pm = rb_entry(n, struct pending_dir_move, node);
5680 while (!list_empty(&pm->list)) {
5681 struct pending_dir_move *pm2;
5683 pm2 = list_first_entry(&pm->list,
5684 struct pending_dir_move, list);
5685 free_pending_move(sctx, pm2);
5687 free_pending_move(sctx, pm);
5690 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
5691 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
5693 struct waiting_dir_move *dm;
5695 n = rb_first(&sctx->waiting_dir_moves);
5696 dm = rb_entry(n, struct waiting_dir_move, node);
5697 rb_erase(&dm->node, &sctx->waiting_dir_moves);
5701 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
5702 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
5704 struct orphan_dir_info *odi;
5706 n = rb_first(&sctx->orphan_dirs);
5707 odi = rb_entry(n, struct orphan_dir_info, node);
5708 free_orphan_dir_info(sctx, odi);
5711 if (sort_clone_roots) {
5712 for (i = 0; i < sctx->clone_roots_cnt; i++)
5713 btrfs_root_dec_send_in_progress(
5714 sctx->clone_roots[i].root);
5716 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
5717 btrfs_root_dec_send_in_progress(
5718 sctx->clone_roots[i].root);
5720 btrfs_root_dec_send_in_progress(send_root);
5722 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
5723 btrfs_root_dec_send_in_progress(sctx->parent_root);
5726 vfree(clone_sources_tmp);
5729 if (sctx->send_filp)
5730 fput(sctx->send_filp);
5732 vfree(sctx->clone_roots);
5733 vfree(sctx->send_buf);
5734 vfree(sctx->read_buf);
5736 name_cache_free(sctx);