2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
37 #include "compression.h"
39 static int g_verbose = 0;
41 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
44 * A fs_path is a helper to dynamically build path names with unknown size.
45 * It reallocates the internal buffer on demand.
46 * It allows fast adding of path elements on the right side (normal path) and
47 * fast adding to the left side (reversed path). A reversed path can also be
48 * unreversed if needed.
57 unsigned short buf_len:15;
58 unsigned short reversed:1;
62 * Average path length does not exceed 200 bytes, we'll have
63 * better packing in the slab and higher chance to satisfy
64 * a allocation later during send.
69 #define FS_PATH_INLINE_SIZE \
70 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
73 /* reused for each extent */
75 struct btrfs_root *root;
82 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
83 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
86 struct file *send_filp;
92 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
93 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
95 struct btrfs_root *send_root;
96 struct btrfs_root *parent_root;
97 struct clone_root *clone_roots;
100 /* current state of the compare_tree call */
101 struct btrfs_path *left_path;
102 struct btrfs_path *right_path;
103 struct btrfs_key *cmp_key;
106 * infos of the currently processed inode. In case of deleted inodes,
107 * these are the values from the deleted inode.
112 int cur_inode_new_gen;
113 int cur_inode_deleted;
117 u64 cur_inode_last_extent;
121 struct list_head new_refs;
122 struct list_head deleted_refs;
124 struct radix_tree_root name_cache;
125 struct list_head name_cache_list;
128 struct file_ra_state ra;
133 * We process inodes by their increasing order, so if before an
134 * incremental send we reverse the parent/child relationship of
135 * directories such that a directory with a lower inode number was
136 * the parent of a directory with a higher inode number, and the one
137 * becoming the new parent got renamed too, we can't rename/move the
138 * directory with lower inode number when we finish processing it - we
139 * must process the directory with higher inode number first, then
140 * rename/move it and then rename/move the directory with lower inode
141 * number. Example follows.
143 * Tree state when the first send was performed:
155 * Tree state when the second (incremental) send is performed:
164 * The sequence of steps that lead to the second state was:
166 * mv /a/b/c/d /a/b/c2/d2
167 * mv /a/b/c /a/b/c2/d2/cc
169 * "c" has lower inode number, but we can't move it (2nd mv operation)
170 * before we move "d", which has higher inode number.
172 * So we just memorize which move/rename operations must be performed
173 * later when their respective parent is processed and moved/renamed.
176 /* Indexed by parent directory inode number. */
177 struct rb_root pending_dir_moves;
180 * Reverse index, indexed by the inode number of a directory that
181 * is waiting for the move/rename of its immediate parent before its
182 * own move/rename can be performed.
184 struct rb_root waiting_dir_moves;
187 * A directory that is going to be rm'ed might have a child directory
188 * which is in the pending directory moves index above. In this case,
189 * the directory can only be removed after the move/rename of its child
190 * is performed. Example:
210 * Sequence of steps that lead to the send snapshot:
211 * rm -f /a/b/c/foo.txt
213 * mv /a/b/c/x /a/b/YY
216 * When the child is processed, its move/rename is delayed until its
217 * parent is processed (as explained above), but all other operations
218 * like update utimes, chown, chgrp, etc, are performed and the paths
219 * that it uses for those operations must use the orphanized name of
220 * its parent (the directory we're going to rm later), so we need to
221 * memorize that name.
223 * Indexed by the inode number of the directory to be deleted.
225 struct rb_root orphan_dirs;
228 struct pending_dir_move {
230 struct list_head list;
234 struct list_head update_refs;
237 struct waiting_dir_move {
241 * There might be some directory that could not be removed because it
242 * was waiting for this directory inode to be moved first. Therefore
243 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
249 struct orphan_dir_info {
255 struct name_cache_entry {
256 struct list_head list;
258 * radix_tree has only 32bit entries but we need to handle 64bit inums.
259 * We use the lower 32bit of the 64bit inum to store it in the tree. If
260 * more then one inum would fall into the same entry, we use radix_list
261 * to store the additional entries. radix_list is also used to store
262 * entries where two entries have the same inum but different
265 struct list_head radix_list;
271 int need_later_update;
276 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
278 static struct waiting_dir_move *
279 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
281 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
283 static int need_send_hole(struct send_ctx *sctx)
285 return (sctx->parent_root && !sctx->cur_inode_new &&
286 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
287 S_ISREG(sctx->cur_inode_mode));
290 static void fs_path_reset(struct fs_path *p)
293 p->start = p->buf + p->buf_len - 1;
303 static struct fs_path *fs_path_alloc(void)
307 p = kmalloc(sizeof(*p), GFP_KERNEL);
311 p->buf = p->inline_buf;
312 p->buf_len = FS_PATH_INLINE_SIZE;
317 static struct fs_path *fs_path_alloc_reversed(void)
329 static void fs_path_free(struct fs_path *p)
333 if (p->buf != p->inline_buf)
338 static int fs_path_len(struct fs_path *p)
340 return p->end - p->start;
343 static int fs_path_ensure_buf(struct fs_path *p, int len)
351 if (p->buf_len >= len)
354 if (len > PATH_MAX) {
359 path_len = p->end - p->start;
360 old_buf_len = p->buf_len;
363 * First time the inline_buf does not suffice
365 if (p->buf == p->inline_buf) {
366 tmp_buf = kmalloc(len, GFP_KERNEL);
368 memcpy(tmp_buf, p->buf, old_buf_len);
370 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
376 * The real size of the buffer is bigger, this will let the fast path
377 * happen most of the time
379 p->buf_len = ksize(p->buf);
382 tmp_buf = p->buf + old_buf_len - path_len - 1;
383 p->end = p->buf + p->buf_len - 1;
384 p->start = p->end - path_len;
385 memmove(p->start, tmp_buf, path_len + 1);
388 p->end = p->start + path_len;
393 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
399 new_len = p->end - p->start + name_len;
400 if (p->start != p->end)
402 ret = fs_path_ensure_buf(p, new_len);
407 if (p->start != p->end)
409 p->start -= name_len;
410 *prepared = p->start;
412 if (p->start != p->end)
423 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
428 ret = fs_path_prepare_for_add(p, name_len, &prepared);
431 memcpy(prepared, name, name_len);
437 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
442 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
445 memcpy(prepared, p2->start, p2->end - p2->start);
451 static int fs_path_add_from_extent_buffer(struct fs_path *p,
452 struct extent_buffer *eb,
453 unsigned long off, int len)
458 ret = fs_path_prepare_for_add(p, len, &prepared);
462 read_extent_buffer(eb, prepared, off, len);
468 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
472 p->reversed = from->reversed;
475 ret = fs_path_add_path(p, from);
481 static void fs_path_unreverse(struct fs_path *p)
490 len = p->end - p->start;
492 p->end = p->start + len;
493 memmove(p->start, tmp, len + 1);
497 static struct btrfs_path *alloc_path_for_send(void)
499 struct btrfs_path *path;
501 path = btrfs_alloc_path();
504 path->search_commit_root = 1;
505 path->skip_locking = 1;
506 path->need_commit_sem = 1;
510 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
520 ret = vfs_write(filp, (__force const char __user *)buf + pos,
522 /* TODO handle that correctly */
523 /*if (ret == -ERESTARTSYS) {
542 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
544 struct btrfs_tlv_header *hdr;
545 int total_len = sizeof(*hdr) + len;
546 int left = sctx->send_max_size - sctx->send_size;
548 if (unlikely(left < total_len))
551 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
552 hdr->tlv_type = cpu_to_le16(attr);
553 hdr->tlv_len = cpu_to_le16(len);
554 memcpy(hdr + 1, data, len);
555 sctx->send_size += total_len;
560 #define TLV_PUT_DEFINE_INT(bits) \
561 static int tlv_put_u##bits(struct send_ctx *sctx, \
562 u##bits attr, u##bits value) \
564 __le##bits __tmp = cpu_to_le##bits(value); \
565 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
568 TLV_PUT_DEFINE_INT(64)
570 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
571 const char *str, int len)
575 return tlv_put(sctx, attr, str, len);
578 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
581 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
584 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
585 struct extent_buffer *eb,
586 struct btrfs_timespec *ts)
588 struct btrfs_timespec bts;
589 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
590 return tlv_put(sctx, attr, &bts, sizeof(bts));
594 #define TLV_PUT(sctx, attrtype, attrlen, data) \
596 ret = tlv_put(sctx, attrtype, attrlen, data); \
598 goto tlv_put_failure; \
601 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
603 ret = tlv_put_u##bits(sctx, attrtype, value); \
605 goto tlv_put_failure; \
608 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
609 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
610 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
611 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
612 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
614 ret = tlv_put_string(sctx, attrtype, str, len); \
616 goto tlv_put_failure; \
618 #define TLV_PUT_PATH(sctx, attrtype, p) \
620 ret = tlv_put_string(sctx, attrtype, p->start, \
621 p->end - p->start); \
623 goto tlv_put_failure; \
625 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
627 ret = tlv_put_uuid(sctx, attrtype, uuid); \
629 goto tlv_put_failure; \
631 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
633 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
635 goto tlv_put_failure; \
638 static int send_header(struct send_ctx *sctx)
640 struct btrfs_stream_header hdr;
642 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
643 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
645 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
650 * For each command/item we want to send to userspace, we call this function.
652 static int begin_cmd(struct send_ctx *sctx, int cmd)
654 struct btrfs_cmd_header *hdr;
656 if (WARN_ON(!sctx->send_buf))
659 BUG_ON(sctx->send_size);
661 sctx->send_size += sizeof(*hdr);
662 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
663 hdr->cmd = cpu_to_le16(cmd);
668 static int send_cmd(struct send_ctx *sctx)
671 struct btrfs_cmd_header *hdr;
674 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
675 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
678 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
679 hdr->crc = cpu_to_le32(crc);
681 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
684 sctx->total_send_size += sctx->send_size;
685 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
692 * Sends a move instruction to user space
694 static int send_rename(struct send_ctx *sctx,
695 struct fs_path *from, struct fs_path *to)
699 verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
701 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
705 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
706 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
708 ret = send_cmd(sctx);
716 * Sends a link instruction to user space
718 static int send_link(struct send_ctx *sctx,
719 struct fs_path *path, struct fs_path *lnk)
723 verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
725 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
729 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
730 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
732 ret = send_cmd(sctx);
740 * Sends an unlink instruction to user space
742 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
746 verbose_printk("btrfs: send_unlink %s\n", path->start);
748 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
752 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
754 ret = send_cmd(sctx);
762 * Sends a rmdir instruction to user space
764 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
768 verbose_printk("btrfs: send_rmdir %s\n", path->start);
770 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
774 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
776 ret = send_cmd(sctx);
784 * Helper function to retrieve some fields from an inode item.
786 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
787 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
791 struct btrfs_inode_item *ii;
792 struct btrfs_key key;
795 key.type = BTRFS_INODE_ITEM_KEY;
797 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
804 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
805 struct btrfs_inode_item);
807 *size = btrfs_inode_size(path->nodes[0], ii);
809 *gen = btrfs_inode_generation(path->nodes[0], ii);
811 *mode = btrfs_inode_mode(path->nodes[0], ii);
813 *uid = btrfs_inode_uid(path->nodes[0], ii);
815 *gid = btrfs_inode_gid(path->nodes[0], ii);
817 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
822 static int get_inode_info(struct btrfs_root *root,
823 u64 ino, u64 *size, u64 *gen,
824 u64 *mode, u64 *uid, u64 *gid,
827 struct btrfs_path *path;
830 path = alloc_path_for_send();
833 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
835 btrfs_free_path(path);
839 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
844 * Helper function to iterate the entries in ONE btrfs_inode_ref or
845 * btrfs_inode_extref.
846 * The iterate callback may return a non zero value to stop iteration. This can
847 * be a negative value for error codes or 1 to simply stop it.
849 * path must point to the INODE_REF or INODE_EXTREF when called.
851 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
852 struct btrfs_key *found_key, int resolve,
853 iterate_inode_ref_t iterate, void *ctx)
855 struct extent_buffer *eb = path->nodes[0];
856 struct btrfs_item *item;
857 struct btrfs_inode_ref *iref;
858 struct btrfs_inode_extref *extref;
859 struct btrfs_path *tmp_path;
863 int slot = path->slots[0];
870 unsigned long name_off;
871 unsigned long elem_size;
874 p = fs_path_alloc_reversed();
878 tmp_path = alloc_path_for_send();
885 if (found_key->type == BTRFS_INODE_REF_KEY) {
886 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
887 struct btrfs_inode_ref);
888 item = btrfs_item_nr(slot);
889 total = btrfs_item_size(eb, item);
890 elem_size = sizeof(*iref);
892 ptr = btrfs_item_ptr_offset(eb, slot);
893 total = btrfs_item_size_nr(eb, slot);
894 elem_size = sizeof(*extref);
897 while (cur < total) {
900 if (found_key->type == BTRFS_INODE_REF_KEY) {
901 iref = (struct btrfs_inode_ref *)(ptr + cur);
902 name_len = btrfs_inode_ref_name_len(eb, iref);
903 name_off = (unsigned long)(iref + 1);
904 index = btrfs_inode_ref_index(eb, iref);
905 dir = found_key->offset;
907 extref = (struct btrfs_inode_extref *)(ptr + cur);
908 name_len = btrfs_inode_extref_name_len(eb, extref);
909 name_off = (unsigned long)&extref->name;
910 index = btrfs_inode_extref_index(eb, extref);
911 dir = btrfs_inode_extref_parent(eb, extref);
915 start = btrfs_ref_to_path(root, tmp_path, name_len,
919 ret = PTR_ERR(start);
922 if (start < p->buf) {
923 /* overflow , try again with larger buffer */
924 ret = fs_path_ensure_buf(p,
925 p->buf_len + p->buf - start);
928 start = btrfs_ref_to_path(root, tmp_path,
933 ret = PTR_ERR(start);
936 BUG_ON(start < p->buf);
940 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
946 cur += elem_size + name_len;
947 ret = iterate(num, dir, index, p, ctx);
954 btrfs_free_path(tmp_path);
959 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
960 const char *name, int name_len,
961 const char *data, int data_len,
965 * Helper function to iterate the entries in ONE btrfs_dir_item.
966 * The iterate callback may return a non zero value to stop iteration. This can
967 * be a negative value for error codes or 1 to simply stop it.
969 * path must point to the dir item when called.
971 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
972 struct btrfs_key *found_key,
973 iterate_dir_item_t iterate, void *ctx)
976 struct extent_buffer *eb;
977 struct btrfs_item *item;
978 struct btrfs_dir_item *di;
979 struct btrfs_key di_key;
992 * Start with a small buffer (1 page). If later we end up needing more
993 * space, which can happen for xattrs on a fs with a leaf size greater
994 * then the page size, attempt to increase the buffer. Typically xattr
998 buf = kmalloc(buf_len, GFP_KERNEL);
1004 eb = path->nodes[0];
1005 slot = path->slots[0];
1006 item = btrfs_item_nr(slot);
1007 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1010 total = btrfs_item_size(eb, item);
1013 while (cur < total) {
1014 name_len = btrfs_dir_name_len(eb, di);
1015 data_len = btrfs_dir_data_len(eb, di);
1016 type = btrfs_dir_type(eb, di);
1017 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1019 if (type == BTRFS_FT_XATTR) {
1020 if (name_len > XATTR_NAME_MAX) {
1021 ret = -ENAMETOOLONG;
1024 if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
1032 if (name_len + data_len > PATH_MAX) {
1033 ret = -ENAMETOOLONG;
1038 if (name_len + data_len > buf_len) {
1039 buf_len = name_len + data_len;
1040 if (is_vmalloc_addr(buf)) {
1044 char *tmp = krealloc(buf, buf_len,
1045 GFP_KERNEL | __GFP_NOWARN);
1052 buf = vmalloc(buf_len);
1060 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1061 name_len + data_len);
1063 len = sizeof(*di) + name_len + data_len;
1064 di = (struct btrfs_dir_item *)((char *)di + len);
1067 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1068 data_len, type, ctx);
1084 static int __copy_first_ref(int num, u64 dir, int index,
1085 struct fs_path *p, void *ctx)
1088 struct fs_path *pt = ctx;
1090 ret = fs_path_copy(pt, p);
1094 /* we want the first only */
1099 * Retrieve the first path of an inode. If an inode has more then one
1100 * ref/hardlink, this is ignored.
1102 static int get_inode_path(struct btrfs_root *root,
1103 u64 ino, struct fs_path *path)
1106 struct btrfs_key key, found_key;
1107 struct btrfs_path *p;
1109 p = alloc_path_for_send();
1113 fs_path_reset(path);
1116 key.type = BTRFS_INODE_REF_KEY;
1119 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1126 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1127 if (found_key.objectid != ino ||
1128 (found_key.type != BTRFS_INODE_REF_KEY &&
1129 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1134 ret = iterate_inode_ref(root, p, &found_key, 1,
1135 __copy_first_ref, path);
1145 struct backref_ctx {
1146 struct send_ctx *sctx;
1148 struct btrfs_path *path;
1149 /* number of total found references */
1153 * used for clones found in send_root. clones found behind cur_objectid
1154 * and cur_offset are not considered as allowed clones.
1159 /* may be truncated in case it's the last extent in a file */
1162 /* data offset in the file extent item */
1165 /* Just to check for bugs in backref resolving */
1169 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1171 u64 root = (u64)(uintptr_t)key;
1172 struct clone_root *cr = (struct clone_root *)elt;
1174 if (root < cr->root->objectid)
1176 if (root > cr->root->objectid)
1181 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1183 struct clone_root *cr1 = (struct clone_root *)e1;
1184 struct clone_root *cr2 = (struct clone_root *)e2;
1186 if (cr1->root->objectid < cr2->root->objectid)
1188 if (cr1->root->objectid > cr2->root->objectid)
1194 * Called for every backref that is found for the current extent.
1195 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1197 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1199 struct backref_ctx *bctx = ctx_;
1200 struct clone_root *found;
1204 /* First check if the root is in the list of accepted clone sources */
1205 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1206 bctx->sctx->clone_roots_cnt,
1207 sizeof(struct clone_root),
1208 __clone_root_cmp_bsearch);
1212 if (found->root == bctx->sctx->send_root &&
1213 ino == bctx->cur_objectid &&
1214 offset == bctx->cur_offset) {
1215 bctx->found_itself = 1;
1219 * There are inodes that have extents that lie behind its i_size. Don't
1220 * accept clones from these extents.
1222 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
1224 btrfs_release_path(bctx->path);
1228 if (offset + bctx->data_offset + bctx->extent_len > i_size)
1232 * Make sure we don't consider clones from send_root that are
1233 * behind the current inode/offset.
1235 if (found->root == bctx->sctx->send_root) {
1237 * TODO for the moment we don't accept clones from the inode
1238 * that is currently send. We may change this when
1239 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1242 if (ino >= bctx->cur_objectid)
1245 if (ino > bctx->cur_objectid)
1247 if (offset + bctx->extent_len > bctx->cur_offset)
1253 found->found_refs++;
1254 if (ino < found->ino) {
1256 found->offset = offset;
1257 } else if (found->ino == ino) {
1259 * same extent found more then once in the same file.
1261 if (found->offset > offset + bctx->extent_len)
1262 found->offset = offset;
1269 * Given an inode, offset and extent item, it finds a good clone for a clone
1270 * instruction. Returns -ENOENT when none could be found. The function makes
1271 * sure that the returned clone is usable at the point where sending is at the
1272 * moment. This means, that no clones are accepted which lie behind the current
1275 * path must point to the extent item when called.
1277 static int find_extent_clone(struct send_ctx *sctx,
1278 struct btrfs_path *path,
1279 u64 ino, u64 data_offset,
1281 struct clone_root **found)
1288 u64 extent_item_pos;
1290 struct btrfs_file_extent_item *fi;
1291 struct extent_buffer *eb = path->nodes[0];
1292 struct backref_ctx *backref_ctx = NULL;
1293 struct clone_root *cur_clone_root;
1294 struct btrfs_key found_key;
1295 struct btrfs_path *tmp_path;
1299 tmp_path = alloc_path_for_send();
1303 /* We only use this path under the commit sem */
1304 tmp_path->need_commit_sem = 0;
1306 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1312 backref_ctx->path = tmp_path;
1314 if (data_offset >= ino_size) {
1316 * There may be extents that lie behind the file's size.
1317 * I at least had this in combination with snapshotting while
1318 * writing large files.
1324 fi = btrfs_item_ptr(eb, path->slots[0],
1325 struct btrfs_file_extent_item);
1326 extent_type = btrfs_file_extent_type(eb, fi);
1327 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1331 compressed = btrfs_file_extent_compression(eb, fi);
1333 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1334 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1335 if (disk_byte == 0) {
1339 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1341 down_read(&sctx->send_root->fs_info->commit_root_sem);
1342 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
1343 &found_key, &flags);
1344 up_read(&sctx->send_root->fs_info->commit_root_sem);
1345 btrfs_release_path(tmp_path);
1349 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1355 * Setup the clone roots.
1357 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1358 cur_clone_root = sctx->clone_roots + i;
1359 cur_clone_root->ino = (u64)-1;
1360 cur_clone_root->offset = 0;
1361 cur_clone_root->found_refs = 0;
1364 backref_ctx->sctx = sctx;
1365 backref_ctx->found = 0;
1366 backref_ctx->cur_objectid = ino;
1367 backref_ctx->cur_offset = data_offset;
1368 backref_ctx->found_itself = 0;
1369 backref_ctx->extent_len = num_bytes;
1371 * For non-compressed extents iterate_extent_inodes() gives us extent
1372 * offsets that already take into account the data offset, but not for
1373 * compressed extents, since the offset is logical and not relative to
1374 * the physical extent locations. We must take this into account to
1375 * avoid sending clone offsets that go beyond the source file's size,
1376 * which would result in the clone ioctl failing with -EINVAL on the
1379 if (compressed == BTRFS_COMPRESS_NONE)
1380 backref_ctx->data_offset = 0;
1382 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1385 * The last extent of a file may be too large due to page alignment.
1386 * We need to adjust extent_len in this case so that the checks in
1387 * __iterate_backrefs work.
1389 if (data_offset + num_bytes >= ino_size)
1390 backref_ctx->extent_len = ino_size - data_offset;
1393 * Now collect all backrefs.
1395 if (compressed == BTRFS_COMPRESS_NONE)
1396 extent_item_pos = logical - found_key.objectid;
1398 extent_item_pos = 0;
1399 ret = iterate_extent_inodes(sctx->send_root->fs_info,
1400 found_key.objectid, extent_item_pos, 1,
1401 __iterate_backrefs, backref_ctx);
1406 if (!backref_ctx->found_itself) {
1407 /* found a bug in backref code? */
1409 btrfs_err(sctx->send_root->fs_info, "did not find backref in "
1410 "send_root. inode=%llu, offset=%llu, "
1411 "disk_byte=%llu found extent=%llu",
1412 ino, data_offset, disk_byte, found_key.objectid);
1416 verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
1418 "num_bytes=%llu, logical=%llu\n",
1419 data_offset, ino, num_bytes, logical);
1421 if (!backref_ctx->found)
1422 verbose_printk("btrfs: no clones found\n");
1424 cur_clone_root = NULL;
1425 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1426 if (sctx->clone_roots[i].found_refs) {
1427 if (!cur_clone_root)
1428 cur_clone_root = sctx->clone_roots + i;
1429 else if (sctx->clone_roots[i].root == sctx->send_root)
1430 /* prefer clones from send_root over others */
1431 cur_clone_root = sctx->clone_roots + i;
1436 if (cur_clone_root) {
1437 *found = cur_clone_root;
1444 btrfs_free_path(tmp_path);
1449 static int read_symlink(struct btrfs_root *root,
1451 struct fs_path *dest)
1454 struct btrfs_path *path;
1455 struct btrfs_key key;
1456 struct btrfs_file_extent_item *ei;
1462 path = alloc_path_for_send();
1467 key.type = BTRFS_EXTENT_DATA_KEY;
1469 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1474 * An empty symlink inode. Can happen in rare error paths when
1475 * creating a symlink (transaction committed before the inode
1476 * eviction handler removed the symlink inode items and a crash
1477 * happened in between or the subvol was snapshoted in between).
1478 * Print an informative message to dmesg/syslog so that the user
1479 * can delete the symlink.
1481 btrfs_err(root->fs_info,
1482 "Found empty symlink inode %llu at root %llu",
1483 ino, root->root_key.objectid);
1488 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1489 struct btrfs_file_extent_item);
1490 type = btrfs_file_extent_type(path->nodes[0], ei);
1491 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1492 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1493 BUG_ON(compression);
1495 off = btrfs_file_extent_inline_start(ei);
1496 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
1498 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1501 btrfs_free_path(path);
1506 * Helper function to generate a file name that is unique in the root of
1507 * send_root and parent_root. This is used to generate names for orphan inodes.
1509 static int gen_unique_name(struct send_ctx *sctx,
1511 struct fs_path *dest)
1514 struct btrfs_path *path;
1515 struct btrfs_dir_item *di;
1520 path = alloc_path_for_send();
1525 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1527 ASSERT(len < sizeof(tmp));
1529 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1530 path, BTRFS_FIRST_FREE_OBJECTID,
1531 tmp, strlen(tmp), 0);
1532 btrfs_release_path(path);
1538 /* not unique, try again */
1543 if (!sctx->parent_root) {
1549 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1550 path, BTRFS_FIRST_FREE_OBJECTID,
1551 tmp, strlen(tmp), 0);
1552 btrfs_release_path(path);
1558 /* not unique, try again */
1566 ret = fs_path_add(dest, tmp, strlen(tmp));
1569 btrfs_free_path(path);
1574 inode_state_no_change,
1575 inode_state_will_create,
1576 inode_state_did_create,
1577 inode_state_will_delete,
1578 inode_state_did_delete,
1581 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1589 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1591 if (ret < 0 && ret != -ENOENT)
1595 if (!sctx->parent_root) {
1596 right_ret = -ENOENT;
1598 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1599 NULL, NULL, NULL, NULL);
1600 if (ret < 0 && ret != -ENOENT)
1605 if (!left_ret && !right_ret) {
1606 if (left_gen == gen && right_gen == gen) {
1607 ret = inode_state_no_change;
1608 } else if (left_gen == gen) {
1609 if (ino < sctx->send_progress)
1610 ret = inode_state_did_create;
1612 ret = inode_state_will_create;
1613 } else if (right_gen == gen) {
1614 if (ino < sctx->send_progress)
1615 ret = inode_state_did_delete;
1617 ret = inode_state_will_delete;
1621 } else if (!left_ret) {
1622 if (left_gen == gen) {
1623 if (ino < sctx->send_progress)
1624 ret = inode_state_did_create;
1626 ret = inode_state_will_create;
1630 } else if (!right_ret) {
1631 if (right_gen == gen) {
1632 if (ino < sctx->send_progress)
1633 ret = inode_state_did_delete;
1635 ret = inode_state_will_delete;
1647 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1651 ret = get_cur_inode_state(sctx, ino, gen);
1655 if (ret == inode_state_no_change ||
1656 ret == inode_state_did_create ||
1657 ret == inode_state_will_delete)
1667 * Helper function to lookup a dir item in a dir.
1669 static int lookup_dir_item_inode(struct btrfs_root *root,
1670 u64 dir, const char *name, int name_len,
1675 struct btrfs_dir_item *di;
1676 struct btrfs_key key;
1677 struct btrfs_path *path;
1679 path = alloc_path_for_send();
1683 di = btrfs_lookup_dir_item(NULL, root, path,
1684 dir, name, name_len, 0);
1693 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1694 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1698 *found_inode = key.objectid;
1699 *found_type = btrfs_dir_type(path->nodes[0], di);
1702 btrfs_free_path(path);
1707 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1708 * generation of the parent dir and the name of the dir entry.
1710 static int get_first_ref(struct btrfs_root *root, u64 ino,
1711 u64 *dir, u64 *dir_gen, struct fs_path *name)
1714 struct btrfs_key key;
1715 struct btrfs_key found_key;
1716 struct btrfs_path *path;
1720 path = alloc_path_for_send();
1725 key.type = BTRFS_INODE_REF_KEY;
1728 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1732 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1734 if (ret || found_key.objectid != ino ||
1735 (found_key.type != BTRFS_INODE_REF_KEY &&
1736 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1741 if (found_key.type == BTRFS_INODE_REF_KEY) {
1742 struct btrfs_inode_ref *iref;
1743 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1744 struct btrfs_inode_ref);
1745 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1746 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1747 (unsigned long)(iref + 1),
1749 parent_dir = found_key.offset;
1751 struct btrfs_inode_extref *extref;
1752 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1753 struct btrfs_inode_extref);
1754 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1755 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1756 (unsigned long)&extref->name, len);
1757 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1761 btrfs_release_path(path);
1764 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1773 btrfs_free_path(path);
1777 static int is_first_ref(struct btrfs_root *root,
1779 const char *name, int name_len)
1782 struct fs_path *tmp_name;
1785 tmp_name = fs_path_alloc();
1789 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1793 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1798 ret = !memcmp(tmp_name->start, name, name_len);
1801 fs_path_free(tmp_name);
1806 * Used by process_recorded_refs to determine if a new ref would overwrite an
1807 * already existing ref. In case it detects an overwrite, it returns the
1808 * inode/gen in who_ino/who_gen.
1809 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1810 * to make sure later references to the overwritten inode are possible.
1811 * Orphanizing is however only required for the first ref of an inode.
1812 * process_recorded_refs does an additional is_first_ref check to see if
1813 * orphanizing is really required.
1815 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1816 const char *name, int name_len,
1817 u64 *who_ino, u64 *who_gen)
1821 u64 other_inode = 0;
1824 if (!sctx->parent_root)
1827 ret = is_inode_existent(sctx, dir, dir_gen);
1832 * If we have a parent root we need to verify that the parent dir was
1833 * not deleted and then re-created, if it was then we have no overwrite
1834 * and we can just unlink this entry.
1836 if (sctx->parent_root) {
1837 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1839 if (ret < 0 && ret != -ENOENT)
1849 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1850 &other_inode, &other_type);
1851 if (ret < 0 && ret != -ENOENT)
1859 * Check if the overwritten ref was already processed. If yes, the ref
1860 * was already unlinked/moved, so we can safely assume that we will not
1861 * overwrite anything at this point in time.
1863 if (other_inode > sctx->send_progress ||
1864 is_waiting_for_move(sctx, other_inode)) {
1865 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1866 who_gen, NULL, NULL, NULL, NULL);
1871 *who_ino = other_inode;
1881 * Checks if the ref was overwritten by an already processed inode. This is
1882 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1883 * thus the orphan name needs be used.
1884 * process_recorded_refs also uses it to avoid unlinking of refs that were
1887 static int did_overwrite_ref(struct send_ctx *sctx,
1888 u64 dir, u64 dir_gen,
1889 u64 ino, u64 ino_gen,
1890 const char *name, int name_len)
1897 if (!sctx->parent_root)
1900 ret = is_inode_existent(sctx, dir, dir_gen);
1904 /* check if the ref was overwritten by another ref */
1905 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1906 &ow_inode, &other_type);
1907 if (ret < 0 && ret != -ENOENT)
1910 /* was never and will never be overwritten */
1915 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1920 if (ow_inode == ino && gen == ino_gen) {
1926 * We know that it is or will be overwritten. Check this now.
1927 * The current inode being processed might have been the one that caused
1928 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1929 * the current inode being processed.
1931 if ((ow_inode < sctx->send_progress) ||
1932 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1933 gen == sctx->cur_inode_gen))
1943 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1944 * that got overwritten. This is used by process_recorded_refs to determine
1945 * if it has to use the path as returned by get_cur_path or the orphan name.
1947 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1950 struct fs_path *name = NULL;
1954 if (!sctx->parent_root)
1957 name = fs_path_alloc();
1961 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1965 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1966 name->start, fs_path_len(name));
1974 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1975 * so we need to do some special handling in case we have clashes. This function
1976 * takes care of this with the help of name_cache_entry::radix_list.
1977 * In case of error, nce is kfreed.
1979 static int name_cache_insert(struct send_ctx *sctx,
1980 struct name_cache_entry *nce)
1983 struct list_head *nce_head;
1985 nce_head = radix_tree_lookup(&sctx->name_cache,
1986 (unsigned long)nce->ino);
1988 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
1993 INIT_LIST_HEAD(nce_head);
1995 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2002 list_add_tail(&nce->radix_list, nce_head);
2003 list_add_tail(&nce->list, &sctx->name_cache_list);
2004 sctx->name_cache_size++;
2009 static void name_cache_delete(struct send_ctx *sctx,
2010 struct name_cache_entry *nce)
2012 struct list_head *nce_head;
2014 nce_head = radix_tree_lookup(&sctx->name_cache,
2015 (unsigned long)nce->ino);
2017 btrfs_err(sctx->send_root->fs_info,
2018 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2019 nce->ino, sctx->name_cache_size);
2022 list_del(&nce->radix_list);
2023 list_del(&nce->list);
2024 sctx->name_cache_size--;
2027 * We may not get to the final release of nce_head if the lookup fails
2029 if (nce_head && list_empty(nce_head)) {
2030 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2035 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2038 struct list_head *nce_head;
2039 struct name_cache_entry *cur;
2041 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2045 list_for_each_entry(cur, nce_head, radix_list) {
2046 if (cur->ino == ino && cur->gen == gen)
2053 * Removes the entry from the list and adds it back to the end. This marks the
2054 * entry as recently used so that name_cache_clean_unused does not remove it.
2056 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2058 list_del(&nce->list);
2059 list_add_tail(&nce->list, &sctx->name_cache_list);
2063 * Remove some entries from the beginning of name_cache_list.
2065 static void name_cache_clean_unused(struct send_ctx *sctx)
2067 struct name_cache_entry *nce;
2069 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2072 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2073 nce = list_entry(sctx->name_cache_list.next,
2074 struct name_cache_entry, list);
2075 name_cache_delete(sctx, nce);
2080 static void name_cache_free(struct send_ctx *sctx)
2082 struct name_cache_entry *nce;
2084 while (!list_empty(&sctx->name_cache_list)) {
2085 nce = list_entry(sctx->name_cache_list.next,
2086 struct name_cache_entry, list);
2087 name_cache_delete(sctx, nce);
2093 * Used by get_cur_path for each ref up to the root.
2094 * Returns 0 if it succeeded.
2095 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2096 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2097 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2098 * Returns <0 in case of error.
2100 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2104 struct fs_path *dest)
2108 struct name_cache_entry *nce = NULL;
2111 * First check if we already did a call to this function with the same
2112 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2113 * return the cached result.
2115 nce = name_cache_search(sctx, ino, gen);
2117 if (ino < sctx->send_progress && nce->need_later_update) {
2118 name_cache_delete(sctx, nce);
2122 name_cache_used(sctx, nce);
2123 *parent_ino = nce->parent_ino;
2124 *parent_gen = nce->parent_gen;
2125 ret = fs_path_add(dest, nce->name, nce->name_len);
2134 * If the inode is not existent yet, add the orphan name and return 1.
2135 * This should only happen for the parent dir that we determine in
2138 ret = is_inode_existent(sctx, ino, gen);
2143 ret = gen_unique_name(sctx, ino, gen, dest);
2151 * Depending on whether the inode was already processed or not, use
2152 * send_root or parent_root for ref lookup.
2154 if (ino < sctx->send_progress)
2155 ret = get_first_ref(sctx->send_root, ino,
2156 parent_ino, parent_gen, dest);
2158 ret = get_first_ref(sctx->parent_root, ino,
2159 parent_ino, parent_gen, dest);
2164 * Check if the ref was overwritten by an inode's ref that was processed
2165 * earlier. If yes, treat as orphan and return 1.
2167 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2168 dest->start, dest->end - dest->start);
2172 fs_path_reset(dest);
2173 ret = gen_unique_name(sctx, ino, gen, dest);
2181 * Store the result of the lookup in the name cache.
2183 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2191 nce->parent_ino = *parent_ino;
2192 nce->parent_gen = *parent_gen;
2193 nce->name_len = fs_path_len(dest);
2195 strcpy(nce->name, dest->start);
2197 if (ino < sctx->send_progress)
2198 nce->need_later_update = 0;
2200 nce->need_later_update = 1;
2202 nce_ret = name_cache_insert(sctx, nce);
2205 name_cache_clean_unused(sctx);
2212 * Magic happens here. This function returns the first ref to an inode as it
2213 * would look like while receiving the stream at this point in time.
2214 * We walk the path up to the root. For every inode in between, we check if it
2215 * was already processed/sent. If yes, we continue with the parent as found
2216 * in send_root. If not, we continue with the parent as found in parent_root.
2217 * If we encounter an inode that was deleted at this point in time, we use the
2218 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2219 * that were not created yet and overwritten inodes/refs.
2221 * When do we have have orphan inodes:
2222 * 1. When an inode is freshly created and thus no valid refs are available yet
2223 * 2. When a directory lost all it's refs (deleted) but still has dir items
2224 * inside which were not processed yet (pending for move/delete). If anyone
2225 * tried to get the path to the dir items, it would get a path inside that
2227 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2228 * of an unprocessed inode. If in that case the first ref would be
2229 * overwritten, the overwritten inode gets "orphanized". Later when we
2230 * process this overwritten inode, it is restored at a new place by moving
2233 * sctx->send_progress tells this function at which point in time receiving
2236 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2237 struct fs_path *dest)
2240 struct fs_path *name = NULL;
2241 u64 parent_inode = 0;
2245 name = fs_path_alloc();
2252 fs_path_reset(dest);
2254 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2255 struct waiting_dir_move *wdm;
2257 fs_path_reset(name);
2259 if (is_waiting_for_rm(sctx, ino)) {
2260 ret = gen_unique_name(sctx, ino, gen, name);
2263 ret = fs_path_add_path(dest, name);
2267 wdm = get_waiting_dir_move(sctx, ino);
2268 if (wdm && wdm->orphanized) {
2269 ret = gen_unique_name(sctx, ino, gen, name);
2272 ret = get_first_ref(sctx->parent_root, ino,
2273 &parent_inode, &parent_gen, name);
2275 ret = __get_cur_name_and_parent(sctx, ino, gen,
2285 ret = fs_path_add_path(dest, name);
2296 fs_path_unreverse(dest);
2301 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2303 static int send_subvol_begin(struct send_ctx *sctx)
2306 struct btrfs_root *send_root = sctx->send_root;
2307 struct btrfs_root *parent_root = sctx->parent_root;
2308 struct btrfs_path *path;
2309 struct btrfs_key key;
2310 struct btrfs_root_ref *ref;
2311 struct extent_buffer *leaf;
2315 path = btrfs_alloc_path();
2319 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2321 btrfs_free_path(path);
2325 key.objectid = send_root->objectid;
2326 key.type = BTRFS_ROOT_BACKREF_KEY;
2329 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2338 leaf = path->nodes[0];
2339 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2340 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2341 key.objectid != send_root->objectid) {
2345 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2346 namelen = btrfs_root_ref_name_len(leaf, ref);
2347 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2348 btrfs_release_path(path);
2351 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2355 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2360 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2362 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2363 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2364 sctx->send_root->root_item.received_uuid);
2366 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2367 sctx->send_root->root_item.uuid);
2369 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2370 le64_to_cpu(sctx->send_root->root_item.ctransid));
2372 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2373 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2374 parent_root->root_item.received_uuid);
2376 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2377 parent_root->root_item.uuid);
2378 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2379 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2382 ret = send_cmd(sctx);
2386 btrfs_free_path(path);
2391 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2396 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
2398 p = fs_path_alloc();
2402 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2406 ret = get_cur_path(sctx, ino, gen, p);
2409 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2410 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2412 ret = send_cmd(sctx);
2420 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2425 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
2427 p = fs_path_alloc();
2431 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2435 ret = get_cur_path(sctx, ino, gen, p);
2438 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2439 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2441 ret = send_cmd(sctx);
2449 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2454 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
2456 p = fs_path_alloc();
2460 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2464 ret = get_cur_path(sctx, ino, gen, p);
2467 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2468 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2469 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2471 ret = send_cmd(sctx);
2479 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2482 struct fs_path *p = NULL;
2483 struct btrfs_inode_item *ii;
2484 struct btrfs_path *path = NULL;
2485 struct extent_buffer *eb;
2486 struct btrfs_key key;
2489 verbose_printk("btrfs: send_utimes %llu\n", ino);
2491 p = fs_path_alloc();
2495 path = alloc_path_for_send();
2502 key.type = BTRFS_INODE_ITEM_KEY;
2504 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2508 eb = path->nodes[0];
2509 slot = path->slots[0];
2510 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2512 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2516 ret = get_cur_path(sctx, ino, gen, p);
2519 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2520 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2521 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2522 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2523 /* TODO Add otime support when the otime patches get into upstream */
2525 ret = send_cmd(sctx);
2530 btrfs_free_path(path);
2535 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2536 * a valid path yet because we did not process the refs yet. So, the inode
2537 * is created as orphan.
2539 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2548 verbose_printk("btrfs: send_create_inode %llu\n", ino);
2550 p = fs_path_alloc();
2554 if (ino != sctx->cur_ino) {
2555 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2560 gen = sctx->cur_inode_gen;
2561 mode = sctx->cur_inode_mode;
2562 rdev = sctx->cur_inode_rdev;
2565 if (S_ISREG(mode)) {
2566 cmd = BTRFS_SEND_C_MKFILE;
2567 } else if (S_ISDIR(mode)) {
2568 cmd = BTRFS_SEND_C_MKDIR;
2569 } else if (S_ISLNK(mode)) {
2570 cmd = BTRFS_SEND_C_SYMLINK;
2571 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2572 cmd = BTRFS_SEND_C_MKNOD;
2573 } else if (S_ISFIFO(mode)) {
2574 cmd = BTRFS_SEND_C_MKFIFO;
2575 } else if (S_ISSOCK(mode)) {
2576 cmd = BTRFS_SEND_C_MKSOCK;
2578 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2579 (int)(mode & S_IFMT));
2584 ret = begin_cmd(sctx, cmd);
2588 ret = gen_unique_name(sctx, ino, gen, p);
2592 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2593 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2595 if (S_ISLNK(mode)) {
2597 ret = read_symlink(sctx->send_root, ino, p);
2600 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2601 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2602 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2603 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2604 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2607 ret = send_cmd(sctx);
2619 * We need some special handling for inodes that get processed before the parent
2620 * directory got created. See process_recorded_refs for details.
2621 * This function does the check if we already created the dir out of order.
2623 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2626 struct btrfs_path *path = NULL;
2627 struct btrfs_key key;
2628 struct btrfs_key found_key;
2629 struct btrfs_key di_key;
2630 struct extent_buffer *eb;
2631 struct btrfs_dir_item *di;
2634 path = alloc_path_for_send();
2641 key.type = BTRFS_DIR_INDEX_KEY;
2643 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2648 eb = path->nodes[0];
2649 slot = path->slots[0];
2650 if (slot >= btrfs_header_nritems(eb)) {
2651 ret = btrfs_next_leaf(sctx->send_root, path);
2654 } else if (ret > 0) {
2661 btrfs_item_key_to_cpu(eb, &found_key, slot);
2662 if (found_key.objectid != key.objectid ||
2663 found_key.type != key.type) {
2668 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2669 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2671 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2672 di_key.objectid < sctx->send_progress) {
2681 btrfs_free_path(path);
2686 * Only creates the inode if it is:
2687 * 1. Not a directory
2688 * 2. Or a directory which was not created already due to out of order
2689 * directories. See did_create_dir and process_recorded_refs for details.
2691 static int send_create_inode_if_needed(struct send_ctx *sctx)
2695 if (S_ISDIR(sctx->cur_inode_mode)) {
2696 ret = did_create_dir(sctx, sctx->cur_ino);
2705 ret = send_create_inode(sctx, sctx->cur_ino);
2713 struct recorded_ref {
2714 struct list_head list;
2717 struct fs_path *full_path;
2725 * We need to process new refs before deleted refs, but compare_tree gives us
2726 * everything mixed. So we first record all refs and later process them.
2727 * This function is a helper to record one ref.
2729 static int __record_ref(struct list_head *head, u64 dir,
2730 u64 dir_gen, struct fs_path *path)
2732 struct recorded_ref *ref;
2734 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2739 ref->dir_gen = dir_gen;
2740 ref->full_path = path;
2742 ref->name = (char *)kbasename(ref->full_path->start);
2743 ref->name_len = ref->full_path->end - ref->name;
2744 ref->dir_path = ref->full_path->start;
2745 if (ref->name == ref->full_path->start)
2746 ref->dir_path_len = 0;
2748 ref->dir_path_len = ref->full_path->end -
2749 ref->full_path->start - 1 - ref->name_len;
2751 list_add_tail(&ref->list, head);
2755 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2757 struct recorded_ref *new;
2759 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2763 new->dir = ref->dir;
2764 new->dir_gen = ref->dir_gen;
2765 new->full_path = NULL;
2766 INIT_LIST_HEAD(&new->list);
2767 list_add_tail(&new->list, list);
2771 static void __free_recorded_refs(struct list_head *head)
2773 struct recorded_ref *cur;
2775 while (!list_empty(head)) {
2776 cur = list_entry(head->next, struct recorded_ref, list);
2777 fs_path_free(cur->full_path);
2778 list_del(&cur->list);
2783 static void free_recorded_refs(struct send_ctx *sctx)
2785 __free_recorded_refs(&sctx->new_refs);
2786 __free_recorded_refs(&sctx->deleted_refs);
2790 * Renames/moves a file/dir to its orphan name. Used when the first
2791 * ref of an unprocessed inode gets overwritten and for all non empty
2794 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2795 struct fs_path *path)
2798 struct fs_path *orphan;
2800 orphan = fs_path_alloc();
2804 ret = gen_unique_name(sctx, ino, gen, orphan);
2808 ret = send_rename(sctx, path, orphan);
2811 fs_path_free(orphan);
2815 static struct orphan_dir_info *
2816 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2818 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2819 struct rb_node *parent = NULL;
2820 struct orphan_dir_info *entry, *odi;
2822 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2824 return ERR_PTR(-ENOMEM);
2830 entry = rb_entry(parent, struct orphan_dir_info, node);
2831 if (dir_ino < entry->ino) {
2833 } else if (dir_ino > entry->ino) {
2834 p = &(*p)->rb_right;
2841 rb_link_node(&odi->node, parent, p);
2842 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2846 static struct orphan_dir_info *
2847 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2849 struct rb_node *n = sctx->orphan_dirs.rb_node;
2850 struct orphan_dir_info *entry;
2853 entry = rb_entry(n, struct orphan_dir_info, node);
2854 if (dir_ino < entry->ino)
2856 else if (dir_ino > entry->ino)
2864 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2866 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2871 static void free_orphan_dir_info(struct send_ctx *sctx,
2872 struct orphan_dir_info *odi)
2876 rb_erase(&odi->node, &sctx->orphan_dirs);
2881 * Returns 1 if a directory can be removed at this point in time.
2882 * We check this by iterating all dir items and checking if the inode behind
2883 * the dir item was already processed.
2885 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2889 struct btrfs_root *root = sctx->parent_root;
2890 struct btrfs_path *path;
2891 struct btrfs_key key;
2892 struct btrfs_key found_key;
2893 struct btrfs_key loc;
2894 struct btrfs_dir_item *di;
2897 * Don't try to rmdir the top/root subvolume dir.
2899 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2902 path = alloc_path_for_send();
2907 key.type = BTRFS_DIR_INDEX_KEY;
2909 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2914 struct waiting_dir_move *dm;
2916 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2917 ret = btrfs_next_leaf(root, path);
2924 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2926 if (found_key.objectid != key.objectid ||
2927 found_key.type != key.type)
2930 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2931 struct btrfs_dir_item);
2932 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2934 dm = get_waiting_dir_move(sctx, loc.objectid);
2936 struct orphan_dir_info *odi;
2938 odi = add_orphan_dir_info(sctx, dir);
2944 dm->rmdir_ino = dir;
2949 if (loc.objectid > send_progress) {
2950 struct orphan_dir_info *odi;
2952 odi = get_orphan_dir_info(sctx, dir);
2953 free_orphan_dir_info(sctx, odi);
2964 btrfs_free_path(path);
2968 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
2970 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
2972 return entry != NULL;
2975 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
2977 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
2978 struct rb_node *parent = NULL;
2979 struct waiting_dir_move *entry, *dm;
2981 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
2986 dm->orphanized = orphanized;
2990 entry = rb_entry(parent, struct waiting_dir_move, node);
2991 if (ino < entry->ino) {
2993 } else if (ino > entry->ino) {
2994 p = &(*p)->rb_right;
3001 rb_link_node(&dm->node, parent, p);
3002 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3006 static struct waiting_dir_move *
3007 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3009 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3010 struct waiting_dir_move *entry;
3013 entry = rb_entry(n, struct waiting_dir_move, node);
3014 if (ino < entry->ino)
3016 else if (ino > entry->ino)
3024 static void free_waiting_dir_move(struct send_ctx *sctx,
3025 struct waiting_dir_move *dm)
3029 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3033 static int add_pending_dir_move(struct send_ctx *sctx,
3037 struct list_head *new_refs,
3038 struct list_head *deleted_refs,
3039 const bool is_orphan)
3041 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3042 struct rb_node *parent = NULL;
3043 struct pending_dir_move *entry = NULL, *pm;
3044 struct recorded_ref *cur;
3048 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3051 pm->parent_ino = parent_ino;
3054 INIT_LIST_HEAD(&pm->list);
3055 INIT_LIST_HEAD(&pm->update_refs);
3056 RB_CLEAR_NODE(&pm->node);
3060 entry = rb_entry(parent, struct pending_dir_move, node);
3061 if (parent_ino < entry->parent_ino) {
3063 } else if (parent_ino > entry->parent_ino) {
3064 p = &(*p)->rb_right;
3071 list_for_each_entry(cur, deleted_refs, list) {
3072 ret = dup_ref(cur, &pm->update_refs);
3076 list_for_each_entry(cur, new_refs, list) {
3077 ret = dup_ref(cur, &pm->update_refs);
3082 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3087 list_add_tail(&pm->list, &entry->list);
3089 rb_link_node(&pm->node, parent, p);
3090 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3095 __free_recorded_refs(&pm->update_refs);
3101 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3104 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3105 struct pending_dir_move *entry;
3108 entry = rb_entry(n, struct pending_dir_move, node);
3109 if (parent_ino < entry->parent_ino)
3111 else if (parent_ino > entry->parent_ino)
3119 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3120 u64 ino, u64 gen, u64 *ancestor_ino)
3123 u64 parent_inode = 0;
3125 u64 start_ino = ino;
3128 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3129 fs_path_reset(name);
3131 if (is_waiting_for_rm(sctx, ino))
3133 if (is_waiting_for_move(sctx, ino)) {
3134 if (*ancestor_ino == 0)
3135 *ancestor_ino = ino;
3136 ret = get_first_ref(sctx->parent_root, ino,
3137 &parent_inode, &parent_gen, name);
3139 ret = __get_cur_name_and_parent(sctx, ino, gen,
3149 if (parent_inode == start_ino) {
3151 if (*ancestor_ino == 0)
3152 *ancestor_ino = ino;
3161 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3163 struct fs_path *from_path = NULL;
3164 struct fs_path *to_path = NULL;
3165 struct fs_path *name = NULL;
3166 u64 orig_progress = sctx->send_progress;
3167 struct recorded_ref *cur;
3168 u64 parent_ino, parent_gen;
3169 struct waiting_dir_move *dm = NULL;
3175 name = fs_path_alloc();
3176 from_path = fs_path_alloc();
3177 if (!name || !from_path) {
3182 dm = get_waiting_dir_move(sctx, pm->ino);
3184 rmdir_ino = dm->rmdir_ino;
3185 is_orphan = dm->orphanized;
3186 free_waiting_dir_move(sctx, dm);
3189 ret = gen_unique_name(sctx, pm->ino,
3190 pm->gen, from_path);
3192 ret = get_first_ref(sctx->parent_root, pm->ino,
3193 &parent_ino, &parent_gen, name);
3196 ret = get_cur_path(sctx, parent_ino, parent_gen,
3200 ret = fs_path_add_path(from_path, name);
3205 sctx->send_progress = sctx->cur_ino + 1;
3206 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3210 LIST_HEAD(deleted_refs);
3211 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3212 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3213 &pm->update_refs, &deleted_refs,
3218 dm = get_waiting_dir_move(sctx, pm->ino);
3220 dm->rmdir_ino = rmdir_ino;
3224 fs_path_reset(name);
3227 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3231 ret = send_rename(sctx, from_path, to_path);
3236 struct orphan_dir_info *odi;
3238 odi = get_orphan_dir_info(sctx, rmdir_ino);
3240 /* already deleted */
3243 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
3249 name = fs_path_alloc();
3254 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
3257 ret = send_rmdir(sctx, name);
3260 free_orphan_dir_info(sctx, odi);
3264 ret = send_utimes(sctx, pm->ino, pm->gen);
3269 * After rename/move, need to update the utimes of both new parent(s)
3270 * and old parent(s).
3272 list_for_each_entry(cur, &pm->update_refs, list) {
3274 * The parent inode might have been deleted in the send snapshot
3276 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3277 NULL, NULL, NULL, NULL, NULL);
3278 if (ret == -ENOENT) {
3285 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3292 fs_path_free(from_path);
3293 fs_path_free(to_path);
3294 sctx->send_progress = orig_progress;
3299 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3301 if (!list_empty(&m->list))
3303 if (!RB_EMPTY_NODE(&m->node))
3304 rb_erase(&m->node, &sctx->pending_dir_moves);
3305 __free_recorded_refs(&m->update_refs);
3309 static void tail_append_pending_moves(struct pending_dir_move *moves,
3310 struct list_head *stack)
3312 if (list_empty(&moves->list)) {
3313 list_add_tail(&moves->list, stack);
3316 list_splice_init(&moves->list, &list);
3317 list_add_tail(&moves->list, stack);
3318 list_splice_tail(&list, stack);
3322 static int apply_children_dir_moves(struct send_ctx *sctx)
3324 struct pending_dir_move *pm;
3325 struct list_head stack;
3326 u64 parent_ino = sctx->cur_ino;
3329 pm = get_pending_dir_moves(sctx, parent_ino);
3333 INIT_LIST_HEAD(&stack);
3334 tail_append_pending_moves(pm, &stack);
3336 while (!list_empty(&stack)) {
3337 pm = list_first_entry(&stack, struct pending_dir_move, list);
3338 parent_ino = pm->ino;
3339 ret = apply_dir_move(sctx, pm);
3340 free_pending_move(sctx, pm);
3343 pm = get_pending_dir_moves(sctx, parent_ino);
3345 tail_append_pending_moves(pm, &stack);
3350 while (!list_empty(&stack)) {
3351 pm = list_first_entry(&stack, struct pending_dir_move, list);
3352 free_pending_move(sctx, pm);
3358 * We might need to delay a directory rename even when no ancestor directory
3359 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3360 * renamed. This happens when we rename a directory to the old name (the name
3361 * in the parent root) of some other unrelated directory that got its rename
3362 * delayed due to some ancestor with higher number that got renamed.
3368 * |---- a/ (ino 257)
3369 * | |---- file (ino 260)
3371 * |---- b/ (ino 258)
3372 * |---- c/ (ino 259)
3376 * |---- a/ (ino 258)
3377 * |---- x/ (ino 259)
3378 * |---- y/ (ino 257)
3379 * |----- file (ino 260)
3381 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3382 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3383 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3386 * 1 - rename 259 from 'c' to 'x'
3387 * 2 - rename 257 from 'a' to 'x/y'
3388 * 3 - rename 258 from 'b' to 'a'
3390 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3391 * be done right away and < 0 on error.
3393 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3394 struct recorded_ref *parent_ref,
3395 const bool is_orphan)
3397 struct btrfs_path *path;
3398 struct btrfs_key key;
3399 struct btrfs_key di_key;
3400 struct btrfs_dir_item *di;
3404 struct waiting_dir_move *wdm;
3406 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3409 path = alloc_path_for_send();
3413 key.objectid = parent_ref->dir;
3414 key.type = BTRFS_DIR_ITEM_KEY;
3415 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3417 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3420 } else if (ret > 0) {
3425 di = btrfs_match_dir_item_name(sctx->parent_root, path,
3426 parent_ref->name, parent_ref->name_len);
3432 * di_key.objectid has the number of the inode that has a dentry in the
3433 * parent directory with the same name that sctx->cur_ino is being
3434 * renamed to. We need to check if that inode is in the send root as
3435 * well and if it is currently marked as an inode with a pending rename,
3436 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3437 * that it happens after that other inode is renamed.
3439 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3440 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3445 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3446 &left_gen, NULL, NULL, NULL, NULL);
3449 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3450 &right_gen, NULL, NULL, NULL, NULL);
3457 /* Different inode, no need to delay the rename of sctx->cur_ino */
3458 if (right_gen != left_gen) {
3463 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3464 if (wdm && !wdm->orphanized) {
3465 ret = add_pending_dir_move(sctx,
3467 sctx->cur_inode_gen,
3470 &sctx->deleted_refs,
3476 btrfs_free_path(path);
3481 * Check if ino ino1 is an ancestor of inode ino2 in the given root.
3482 * Return 1 if true, 0 if false and < 0 on error.
3484 static int is_ancestor(struct btrfs_root *root,
3488 struct fs_path *fs_path)
3492 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3497 fs_path_reset(fs_path);
3498 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3500 if (ret == -ENOENT && ino == ino2)
3505 return parent_gen == ino1_gen ? 1 : 0;
3511 static int wait_for_parent_move(struct send_ctx *sctx,
3512 struct recorded_ref *parent_ref,
3513 const bool is_orphan)
3516 u64 ino = parent_ref->dir;
3517 u64 parent_ino_before, parent_ino_after;
3518 struct fs_path *path_before = NULL;
3519 struct fs_path *path_after = NULL;
3522 path_after = fs_path_alloc();
3523 path_before = fs_path_alloc();
3524 if (!path_after || !path_before) {
3530 * Our current directory inode may not yet be renamed/moved because some
3531 * ancestor (immediate or not) has to be renamed/moved first. So find if
3532 * such ancestor exists and make sure our own rename/move happens after
3533 * that ancestor is processed to avoid path build infinite loops (done
3534 * at get_cur_path()).
3536 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3537 if (is_waiting_for_move(sctx, ino)) {
3539 * If the current inode is an ancestor of ino in the
3540 * parent root, we need to delay the rename of the
3541 * current inode, otherwise don't delayed the rename
3542 * because we can end up with a circular dependency
3543 * of renames, resulting in some directories never
3544 * getting the respective rename operations issued in
3545 * the send stream or getting into infinite path build
3548 ret = is_ancestor(sctx->parent_root,
3549 sctx->cur_ino, sctx->cur_inode_gen,
3555 fs_path_reset(path_before);
3556 fs_path_reset(path_after);
3558 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3562 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3564 if (ret < 0 && ret != -ENOENT) {
3566 } else if (ret == -ENOENT) {
3571 len1 = fs_path_len(path_before);
3572 len2 = fs_path_len(path_after);
3573 if (ino > sctx->cur_ino &&
3574 (parent_ino_before != parent_ino_after || len1 != len2 ||
3575 memcmp(path_before->start, path_after->start, len1))) {
3579 ino = parent_ino_after;
3583 fs_path_free(path_before);
3584 fs_path_free(path_after);
3587 ret = add_pending_dir_move(sctx,
3589 sctx->cur_inode_gen,
3592 &sctx->deleted_refs,
3602 * This does all the move/link/unlink/rmdir magic.
3604 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3607 struct recorded_ref *cur;
3608 struct recorded_ref *cur2;
3609 struct list_head check_dirs;
3610 struct fs_path *valid_path = NULL;
3613 int did_overwrite = 0;
3615 u64 last_dir_ino_rm = 0;
3616 bool can_rename = true;
3618 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3621 * This should never happen as the root dir always has the same ref
3622 * which is always '..'
3624 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3625 INIT_LIST_HEAD(&check_dirs);
3627 valid_path = fs_path_alloc();
3634 * First, check if the first ref of the current inode was overwritten
3635 * before. If yes, we know that the current inode was already orphanized
3636 * and thus use the orphan name. If not, we can use get_cur_path to
3637 * get the path of the first ref as it would like while receiving at
3638 * this point in time.
3639 * New inodes are always orphan at the beginning, so force to use the
3640 * orphan name in this case.
3641 * The first ref is stored in valid_path and will be updated if it
3642 * gets moved around.
3644 if (!sctx->cur_inode_new) {
3645 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3646 sctx->cur_inode_gen);
3652 if (sctx->cur_inode_new || did_overwrite) {
3653 ret = gen_unique_name(sctx, sctx->cur_ino,
3654 sctx->cur_inode_gen, valid_path);
3659 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3665 list_for_each_entry(cur, &sctx->new_refs, list) {
3667 * We may have refs where the parent directory does not exist
3668 * yet. This happens if the parent directories inum is higher
3669 * the the current inum. To handle this case, we create the
3670 * parent directory out of order. But we need to check if this
3671 * did already happen before due to other refs in the same dir.
3673 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3676 if (ret == inode_state_will_create) {
3679 * First check if any of the current inodes refs did
3680 * already create the dir.
3682 list_for_each_entry(cur2, &sctx->new_refs, list) {
3685 if (cur2->dir == cur->dir) {
3692 * If that did not happen, check if a previous inode
3693 * did already create the dir.
3696 ret = did_create_dir(sctx, cur->dir);
3700 ret = send_create_inode(sctx, cur->dir);
3707 * Check if this new ref would overwrite the first ref of
3708 * another unprocessed inode. If yes, orphanize the
3709 * overwritten inode. If we find an overwritten ref that is
3710 * not the first ref, simply unlink it.
3712 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3713 cur->name, cur->name_len,
3714 &ow_inode, &ow_gen);
3718 ret = is_first_ref(sctx->parent_root,
3719 ow_inode, cur->dir, cur->name,
3724 struct name_cache_entry *nce;
3725 struct waiting_dir_move *wdm;
3727 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3733 * If ow_inode has its rename operation delayed
3734 * make sure that its orphanized name is used in
3735 * the source path when performing its rename
3738 if (is_waiting_for_move(sctx, ow_inode)) {
3739 wdm = get_waiting_dir_move(sctx,
3742 wdm->orphanized = true;
3746 * Make sure we clear our orphanized inode's
3747 * name from the name cache. This is because the
3748 * inode ow_inode might be an ancestor of some
3749 * other inode that will be orphanized as well
3750 * later and has an inode number greater than
3751 * sctx->send_progress. We need to prevent
3752 * future name lookups from using the old name
3753 * and get instead the orphan name.
3755 nce = name_cache_search(sctx, ow_inode, ow_gen);
3757 name_cache_delete(sctx, nce);
3762 * ow_inode might currently be an ancestor of
3763 * cur_ino, therefore compute valid_path (the
3764 * current path of cur_ino) again because it
3765 * might contain the pre-orphanization name of
3766 * ow_inode, which is no longer valid.
3768 fs_path_reset(valid_path);
3769 ret = get_cur_path(sctx, sctx->cur_ino,
3770 sctx->cur_inode_gen, valid_path);
3774 ret = send_unlink(sctx, cur->full_path);
3780 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
3781 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
3790 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
3792 ret = wait_for_parent_move(sctx, cur, is_orphan);
3802 * link/move the ref to the new place. If we have an orphan
3803 * inode, move it and update valid_path. If not, link or move
3804 * it depending on the inode mode.
3806 if (is_orphan && can_rename) {
3807 ret = send_rename(sctx, valid_path, cur->full_path);
3811 ret = fs_path_copy(valid_path, cur->full_path);
3814 } else if (can_rename) {
3815 if (S_ISDIR(sctx->cur_inode_mode)) {
3817 * Dirs can't be linked, so move it. For moved
3818 * dirs, we always have one new and one deleted
3819 * ref. The deleted ref is ignored later.
3821 ret = send_rename(sctx, valid_path,
3824 ret = fs_path_copy(valid_path,
3829 ret = send_link(sctx, cur->full_path,
3835 ret = dup_ref(cur, &check_dirs);
3840 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
3842 * Check if we can already rmdir the directory. If not,
3843 * orphanize it. For every dir item inside that gets deleted
3844 * later, we do this check again and rmdir it then if possible.
3845 * See the use of check_dirs for more details.
3847 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3852 ret = send_rmdir(sctx, valid_path);
3855 } else if (!is_orphan) {
3856 ret = orphanize_inode(sctx, sctx->cur_ino,
3857 sctx->cur_inode_gen, valid_path);
3863 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3864 ret = dup_ref(cur, &check_dirs);
3868 } else if (S_ISDIR(sctx->cur_inode_mode) &&
3869 !list_empty(&sctx->deleted_refs)) {
3871 * We have a moved dir. Add the old parent to check_dirs
3873 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
3875 ret = dup_ref(cur, &check_dirs);
3878 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
3880 * We have a non dir inode. Go through all deleted refs and
3881 * unlink them if they were not already overwritten by other
3884 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3885 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3886 sctx->cur_ino, sctx->cur_inode_gen,
3887 cur->name, cur->name_len);
3891 ret = send_unlink(sctx, cur->full_path);
3895 ret = dup_ref(cur, &check_dirs);
3900 * If the inode is still orphan, unlink the orphan. This may
3901 * happen when a previous inode did overwrite the first ref
3902 * of this inode and no new refs were added for the current
3903 * inode. Unlinking does not mean that the inode is deleted in
3904 * all cases. There may still be links to this inode in other
3908 ret = send_unlink(sctx, valid_path);
3915 * We did collect all parent dirs where cur_inode was once located. We
3916 * now go through all these dirs and check if they are pending for
3917 * deletion and if it's finally possible to perform the rmdir now.
3918 * We also update the inode stats of the parent dirs here.
3920 list_for_each_entry(cur, &check_dirs, list) {
3922 * In case we had refs into dirs that were not processed yet,
3923 * we don't need to do the utime and rmdir logic for these dirs.
3924 * The dir will be processed later.
3926 if (cur->dir > sctx->cur_ino)
3929 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3933 if (ret == inode_state_did_create ||
3934 ret == inode_state_no_change) {
3935 /* TODO delayed utimes */
3936 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3939 } else if (ret == inode_state_did_delete &&
3940 cur->dir != last_dir_ino_rm) {
3941 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
3946 ret = get_cur_path(sctx, cur->dir,
3947 cur->dir_gen, valid_path);
3950 ret = send_rmdir(sctx, valid_path);
3953 last_dir_ino_rm = cur->dir;
3961 __free_recorded_refs(&check_dirs);
3962 free_recorded_refs(sctx);
3963 fs_path_free(valid_path);
3967 static int record_ref(struct btrfs_root *root, int num, u64 dir, int index,
3968 struct fs_path *name, void *ctx, struct list_head *refs)
3971 struct send_ctx *sctx = ctx;
3975 p = fs_path_alloc();
3979 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
3984 ret = get_cur_path(sctx, dir, gen, p);
3987 ret = fs_path_add_path(p, name);
3991 ret = __record_ref(refs, dir, gen, p);
3999 static int __record_new_ref(int num, u64 dir, int index,
4000 struct fs_path *name,
4003 struct send_ctx *sctx = ctx;
4004 return record_ref(sctx->send_root, num, dir, index, name,
4005 ctx, &sctx->new_refs);
4009 static int __record_deleted_ref(int num, u64 dir, int index,
4010 struct fs_path *name,
4013 struct send_ctx *sctx = ctx;
4014 return record_ref(sctx->parent_root, num, dir, index, name,
4015 ctx, &sctx->deleted_refs);
4018 static int record_new_ref(struct send_ctx *sctx)
4022 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4023 sctx->cmp_key, 0, __record_new_ref, sctx);
4032 static int record_deleted_ref(struct send_ctx *sctx)
4036 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4037 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4046 struct find_ref_ctx {
4049 struct btrfs_root *root;
4050 struct fs_path *name;
4054 static int __find_iref(int num, u64 dir, int index,
4055 struct fs_path *name,
4058 struct find_ref_ctx *ctx = ctx_;
4062 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4063 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4065 * To avoid doing extra lookups we'll only do this if everything
4068 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4072 if (dir_gen != ctx->dir_gen)
4074 ctx->found_idx = num;
4080 static int find_iref(struct btrfs_root *root,
4081 struct btrfs_path *path,
4082 struct btrfs_key *key,
4083 u64 dir, u64 dir_gen, struct fs_path *name)
4086 struct find_ref_ctx ctx;
4090 ctx.dir_gen = dir_gen;
4094 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4098 if (ctx.found_idx == -1)
4101 return ctx.found_idx;
4104 static int __record_changed_new_ref(int num, u64 dir, int index,
4105 struct fs_path *name,
4110 struct send_ctx *sctx = ctx;
4112 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4117 ret = find_iref(sctx->parent_root, sctx->right_path,
4118 sctx->cmp_key, dir, dir_gen, name);
4120 ret = __record_new_ref(num, dir, index, name, sctx);
4127 static int __record_changed_deleted_ref(int num, u64 dir, int index,
4128 struct fs_path *name,
4133 struct send_ctx *sctx = ctx;
4135 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4140 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4141 dir, dir_gen, name);
4143 ret = __record_deleted_ref(num, dir, index, name, sctx);
4150 static int record_changed_ref(struct send_ctx *sctx)
4154 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4155 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4158 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4159 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4169 * Record and process all refs at once. Needed when an inode changes the
4170 * generation number, which means that it was deleted and recreated.
4172 static int process_all_refs(struct send_ctx *sctx,
4173 enum btrfs_compare_tree_result cmd)
4176 struct btrfs_root *root;
4177 struct btrfs_path *path;
4178 struct btrfs_key key;
4179 struct btrfs_key found_key;
4180 struct extent_buffer *eb;
4182 iterate_inode_ref_t cb;
4183 int pending_move = 0;
4185 path = alloc_path_for_send();
4189 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4190 root = sctx->send_root;
4191 cb = __record_new_ref;
4192 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4193 root = sctx->parent_root;
4194 cb = __record_deleted_ref;
4196 btrfs_err(sctx->send_root->fs_info,
4197 "Wrong command %d in process_all_refs", cmd);
4202 key.objectid = sctx->cmp_key->objectid;
4203 key.type = BTRFS_INODE_REF_KEY;
4205 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4210 eb = path->nodes[0];
4211 slot = path->slots[0];
4212 if (slot >= btrfs_header_nritems(eb)) {
4213 ret = btrfs_next_leaf(root, path);
4221 btrfs_item_key_to_cpu(eb, &found_key, slot);
4223 if (found_key.objectid != key.objectid ||
4224 (found_key.type != BTRFS_INODE_REF_KEY &&
4225 found_key.type != BTRFS_INODE_EXTREF_KEY))
4228 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4234 btrfs_release_path(path);
4236 ret = process_recorded_refs(sctx, &pending_move);
4237 /* Only applicable to an incremental send. */
4238 ASSERT(pending_move == 0);
4241 btrfs_free_path(path);
4245 static int send_set_xattr(struct send_ctx *sctx,
4246 struct fs_path *path,
4247 const char *name, int name_len,
4248 const char *data, int data_len)
4252 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4256 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4257 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4258 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4260 ret = send_cmd(sctx);
4267 static int send_remove_xattr(struct send_ctx *sctx,
4268 struct fs_path *path,
4269 const char *name, int name_len)
4273 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4277 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4278 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4280 ret = send_cmd(sctx);
4287 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4288 const char *name, int name_len,
4289 const char *data, int data_len,
4293 struct send_ctx *sctx = ctx;
4295 posix_acl_xattr_header dummy_acl;
4297 p = fs_path_alloc();
4302 * This hack is needed because empty acls are stored as zero byte
4303 * data in xattrs. Problem with that is, that receiving these zero byte
4304 * acls will fail later. To fix this, we send a dummy acl list that
4305 * only contains the version number and no entries.
4307 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4308 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4309 if (data_len == 0) {
4310 dummy_acl.a_version =
4311 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4312 data = (char *)&dummy_acl;
4313 data_len = sizeof(dummy_acl);
4317 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4321 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4328 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4329 const char *name, int name_len,
4330 const char *data, int data_len,
4334 struct send_ctx *sctx = ctx;
4337 p = fs_path_alloc();
4341 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4345 ret = send_remove_xattr(sctx, p, name, name_len);
4352 static int process_new_xattr(struct send_ctx *sctx)
4356 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4357 sctx->cmp_key, __process_new_xattr, sctx);
4362 static int process_deleted_xattr(struct send_ctx *sctx)
4366 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4367 sctx->cmp_key, __process_deleted_xattr, sctx);
4372 struct find_xattr_ctx {
4380 static int __find_xattr(int num, struct btrfs_key *di_key,
4381 const char *name, int name_len,
4382 const char *data, int data_len,
4383 u8 type, void *vctx)
4385 struct find_xattr_ctx *ctx = vctx;
4387 if (name_len == ctx->name_len &&
4388 strncmp(name, ctx->name, name_len) == 0) {
4389 ctx->found_idx = num;
4390 ctx->found_data_len = data_len;
4391 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4392 if (!ctx->found_data)
4399 static int find_xattr(struct btrfs_root *root,
4400 struct btrfs_path *path,
4401 struct btrfs_key *key,
4402 const char *name, int name_len,
4403 char **data, int *data_len)
4406 struct find_xattr_ctx ctx;
4409 ctx.name_len = name_len;
4411 ctx.found_data = NULL;
4412 ctx.found_data_len = 0;
4414 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
4418 if (ctx.found_idx == -1)
4421 *data = ctx.found_data;
4422 *data_len = ctx.found_data_len;
4424 kfree(ctx.found_data);
4426 return ctx.found_idx;
4430 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4431 const char *name, int name_len,
4432 const char *data, int data_len,
4436 struct send_ctx *sctx = ctx;
4437 char *found_data = NULL;
4438 int found_data_len = 0;
4440 ret = find_xattr(sctx->parent_root, sctx->right_path,
4441 sctx->cmp_key, name, name_len, &found_data,
4443 if (ret == -ENOENT) {
4444 ret = __process_new_xattr(num, di_key, name, name_len, data,
4445 data_len, type, ctx);
4446 } else if (ret >= 0) {
4447 if (data_len != found_data_len ||
4448 memcmp(data, found_data, data_len)) {
4449 ret = __process_new_xattr(num, di_key, name, name_len,
4450 data, data_len, type, ctx);
4460 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4461 const char *name, int name_len,
4462 const char *data, int data_len,
4466 struct send_ctx *sctx = ctx;
4468 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4469 name, name_len, NULL, NULL);
4471 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4472 data_len, type, ctx);
4479 static int process_changed_xattr(struct send_ctx *sctx)
4483 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4484 sctx->cmp_key, __process_changed_new_xattr, sctx);
4487 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4488 sctx->cmp_key, __process_changed_deleted_xattr, sctx);
4494 static int process_all_new_xattrs(struct send_ctx *sctx)
4497 struct btrfs_root *root;
4498 struct btrfs_path *path;
4499 struct btrfs_key key;
4500 struct btrfs_key found_key;
4501 struct extent_buffer *eb;
4504 path = alloc_path_for_send();
4508 root = sctx->send_root;
4510 key.objectid = sctx->cmp_key->objectid;
4511 key.type = BTRFS_XATTR_ITEM_KEY;
4513 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4518 eb = path->nodes[0];
4519 slot = path->slots[0];
4520 if (slot >= btrfs_header_nritems(eb)) {
4521 ret = btrfs_next_leaf(root, path);
4524 } else if (ret > 0) {
4531 btrfs_item_key_to_cpu(eb, &found_key, slot);
4532 if (found_key.objectid != key.objectid ||
4533 found_key.type != key.type) {
4538 ret = iterate_dir_item(root, path, &found_key,
4539 __process_new_xattr, sctx);
4547 btrfs_free_path(path);
4551 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4553 struct btrfs_root *root = sctx->send_root;
4554 struct btrfs_fs_info *fs_info = root->fs_info;
4555 struct inode *inode;
4558 struct btrfs_key key;
4559 pgoff_t index = offset >> PAGE_SHIFT;
4561 unsigned pg_offset = offset & ~PAGE_MASK;
4564 key.objectid = sctx->cur_ino;
4565 key.type = BTRFS_INODE_ITEM_KEY;
4568 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4570 return PTR_ERR(inode);
4572 if (offset + len > i_size_read(inode)) {
4573 if (offset > i_size_read(inode))
4576 len = offset - i_size_read(inode);
4581 last_index = (offset + len - 1) >> PAGE_SHIFT;
4583 /* initial readahead */
4584 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4585 file_ra_state_init(&sctx->ra, inode->i_mapping);
4586 btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index,
4587 last_index - index + 1);
4589 while (index <= last_index) {
4590 unsigned cur_len = min_t(unsigned, len,
4591 PAGE_SIZE - pg_offset);
4592 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
4598 if (!PageUptodate(page)) {
4599 btrfs_readpage(NULL, page);
4601 if (!PageUptodate(page)) {
4610 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4625 * Read some bytes from the current inode/file and send a write command to
4628 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4632 ssize_t num_read = 0;
4634 p = fs_path_alloc();
4638 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
4640 num_read = fill_read_buf(sctx, offset, len);
4641 if (num_read <= 0) {
4647 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4651 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4655 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4656 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4657 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4659 ret = send_cmd(sctx);
4670 * Send a clone command to user space.
4672 static int send_clone(struct send_ctx *sctx,
4673 u64 offset, u32 len,
4674 struct clone_root *clone_root)
4680 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
4681 "clone_inode=%llu, clone_offset=%llu\n", offset, len,
4682 clone_root->root->objectid, clone_root->ino,
4683 clone_root->offset);
4685 p = fs_path_alloc();
4689 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4693 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4697 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4698 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4699 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4701 if (clone_root->root == sctx->send_root) {
4702 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4703 &gen, NULL, NULL, NULL, NULL);
4706 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4708 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4714 * If the parent we're using has a received_uuid set then use that as
4715 * our clone source as that is what we will look for when doing a
4718 * This covers the case that we create a snapshot off of a received
4719 * subvolume and then use that as the parent and try to receive on a
4722 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
4723 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4724 clone_root->root->root_item.received_uuid);
4726 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4727 clone_root->root->root_item.uuid);
4728 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4729 le64_to_cpu(clone_root->root->root_item.ctransid));
4730 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4731 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4732 clone_root->offset);
4734 ret = send_cmd(sctx);
4743 * Send an update extent command to user space.
4745 static int send_update_extent(struct send_ctx *sctx,
4746 u64 offset, u32 len)
4751 p = fs_path_alloc();
4755 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4759 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4763 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4764 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4765 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4767 ret = send_cmd(sctx);
4775 static int send_hole(struct send_ctx *sctx, u64 end)
4777 struct fs_path *p = NULL;
4778 u64 offset = sctx->cur_inode_last_extent;
4782 p = fs_path_alloc();
4785 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4787 goto tlv_put_failure;
4788 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
4789 while (offset < end) {
4790 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
4792 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4795 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4796 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4797 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
4798 ret = send_cmd(sctx);
4808 static int send_extent_data(struct send_ctx *sctx,
4814 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
4815 return send_update_extent(sctx, offset, len);
4817 while (sent < len) {
4818 u64 size = len - sent;
4821 if (size > BTRFS_SEND_READ_SIZE)
4822 size = BTRFS_SEND_READ_SIZE;
4823 ret = send_write(sctx, offset + sent, size);
4833 static int clone_range(struct send_ctx *sctx,
4834 struct clone_root *clone_root,
4835 const u64 disk_byte,
4840 struct btrfs_path *path;
4841 struct btrfs_key key;
4844 path = alloc_path_for_send();
4849 * We can't send a clone operation for the entire range if we find
4850 * extent items in the respective range in the source file that
4851 * refer to different extents or if we find holes.
4852 * So check for that and do a mix of clone and regular write/copy
4853 * operations if needed.
4857 * mkfs.btrfs -f /dev/sda
4858 * mount /dev/sda /mnt
4859 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
4860 * cp --reflink=always /mnt/foo /mnt/bar
4861 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
4862 * btrfs subvolume snapshot -r /mnt /mnt/snap
4864 * If when we send the snapshot and we are processing file bar (which
4865 * has a higher inode number than foo) we blindly send a clone operation
4866 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
4867 * a file bar that matches the content of file foo - iow, doesn't match
4868 * the content from bar in the original filesystem.
4870 key.objectid = clone_root->ino;
4871 key.type = BTRFS_EXTENT_DATA_KEY;
4872 key.offset = clone_root->offset;
4873 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
4876 if (ret > 0 && path->slots[0] > 0) {
4877 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
4878 if (key.objectid == clone_root->ino &&
4879 key.type == BTRFS_EXTENT_DATA_KEY)
4884 struct extent_buffer *leaf = path->nodes[0];
4885 int slot = path->slots[0];
4886 struct btrfs_file_extent_item *ei;
4891 if (slot >= btrfs_header_nritems(leaf)) {
4892 ret = btrfs_next_leaf(clone_root->root, path);
4900 btrfs_item_key_to_cpu(leaf, &key, slot);
4903 * We might have an implicit trailing hole (NO_HOLES feature
4904 * enabled). We deal with it after leaving this loop.
4906 if (key.objectid != clone_root->ino ||
4907 key.type != BTRFS_EXTENT_DATA_KEY)
4910 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4911 type = btrfs_file_extent_type(leaf, ei);
4912 if (type == BTRFS_FILE_EXTENT_INLINE) {
4913 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
4914 ext_len = PAGE_ALIGN(ext_len);
4916 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
4919 if (key.offset + ext_len <= clone_root->offset)
4922 if (key.offset > clone_root->offset) {
4923 /* Implicit hole, NO_HOLES feature enabled. */
4924 u64 hole_len = key.offset - clone_root->offset;
4928 ret = send_extent_data(sctx, offset, hole_len);
4936 clone_root->offset += hole_len;
4937 data_offset += hole_len;
4940 if (key.offset >= clone_root->offset + len)
4943 clone_len = min_t(u64, ext_len, len);
4945 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
4946 btrfs_file_extent_offset(leaf, ei) == data_offset)
4947 ret = send_clone(sctx, offset, clone_len, clone_root);
4949 ret = send_extent_data(sctx, offset, clone_len);
4957 offset += clone_len;
4958 clone_root->offset += clone_len;
4959 data_offset += clone_len;
4965 ret = send_extent_data(sctx, offset, len);
4969 btrfs_free_path(path);
4973 static int send_write_or_clone(struct send_ctx *sctx,
4974 struct btrfs_path *path,
4975 struct btrfs_key *key,
4976 struct clone_root *clone_root)
4979 struct btrfs_file_extent_item *ei;
4980 u64 offset = key->offset;
4983 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
4985 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4986 struct btrfs_file_extent_item);
4987 type = btrfs_file_extent_type(path->nodes[0], ei);
4988 if (type == BTRFS_FILE_EXTENT_INLINE) {
4989 len = btrfs_file_extent_inline_len(path->nodes[0],
4990 path->slots[0], ei);
4992 * it is possible the inline item won't cover the whole page,
4993 * but there may be items after this page. Make
4994 * sure to send the whole thing
4996 len = PAGE_ALIGN(len);
4998 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5001 if (offset + len > sctx->cur_inode_size)
5002 len = sctx->cur_inode_size - offset;
5008 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5012 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5013 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5014 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5017 ret = send_extent_data(sctx, offset, len);
5023 static int is_extent_unchanged(struct send_ctx *sctx,
5024 struct btrfs_path *left_path,
5025 struct btrfs_key *ekey)
5028 struct btrfs_key key;
5029 struct btrfs_path *path = NULL;
5030 struct extent_buffer *eb;
5032 struct btrfs_key found_key;
5033 struct btrfs_file_extent_item *ei;
5038 u64 left_offset_fixed;
5046 path = alloc_path_for_send();
5050 eb = left_path->nodes[0];
5051 slot = left_path->slots[0];
5052 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5053 left_type = btrfs_file_extent_type(eb, ei);
5055 if (left_type != BTRFS_FILE_EXTENT_REG) {
5059 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5060 left_len = btrfs_file_extent_num_bytes(eb, ei);
5061 left_offset = btrfs_file_extent_offset(eb, ei);
5062 left_gen = btrfs_file_extent_generation(eb, ei);
5065 * Following comments will refer to these graphics. L is the left
5066 * extents which we are checking at the moment. 1-8 are the right
5067 * extents that we iterate.
5070 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5073 * |--1--|-2b-|...(same as above)
5075 * Alternative situation. Happens on files where extents got split.
5077 * |-----------7-----------|-6-|
5079 * Alternative situation. Happens on files which got larger.
5082 * Nothing follows after 8.
5085 key.objectid = ekey->objectid;
5086 key.type = BTRFS_EXTENT_DATA_KEY;
5087 key.offset = ekey->offset;
5088 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5097 * Handle special case where the right side has no extents at all.
5099 eb = path->nodes[0];
5100 slot = path->slots[0];
5101 btrfs_item_key_to_cpu(eb, &found_key, slot);
5102 if (found_key.objectid != key.objectid ||
5103 found_key.type != key.type) {
5104 /* If we're a hole then just pretend nothing changed */
5105 ret = (left_disknr) ? 0 : 1;
5110 * We're now on 2a, 2b or 7.
5113 while (key.offset < ekey->offset + left_len) {
5114 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5115 right_type = btrfs_file_extent_type(eb, ei);
5116 if (right_type != BTRFS_FILE_EXTENT_REG) {
5121 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5122 right_len = btrfs_file_extent_num_bytes(eb, ei);
5123 right_offset = btrfs_file_extent_offset(eb, ei);
5124 right_gen = btrfs_file_extent_generation(eb, ei);
5127 * Are we at extent 8? If yes, we know the extent is changed.
5128 * This may only happen on the first iteration.
5130 if (found_key.offset + right_len <= ekey->offset) {
5131 /* If we're a hole just pretend nothing changed */
5132 ret = (left_disknr) ? 0 : 1;
5136 left_offset_fixed = left_offset;
5137 if (key.offset < ekey->offset) {
5138 /* Fix the right offset for 2a and 7. */
5139 right_offset += ekey->offset - key.offset;
5141 /* Fix the left offset for all behind 2a and 2b */
5142 left_offset_fixed += key.offset - ekey->offset;
5146 * Check if we have the same extent.
5148 if (left_disknr != right_disknr ||
5149 left_offset_fixed != right_offset ||
5150 left_gen != right_gen) {
5156 * Go to the next extent.
5158 ret = btrfs_next_item(sctx->parent_root, path);
5162 eb = path->nodes[0];
5163 slot = path->slots[0];
5164 btrfs_item_key_to_cpu(eb, &found_key, slot);
5166 if (ret || found_key.objectid != key.objectid ||
5167 found_key.type != key.type) {
5168 key.offset += right_len;
5171 if (found_key.offset != key.offset + right_len) {
5179 * We're now behind the left extent (treat as unchanged) or at the end
5180 * of the right side (treat as changed).
5182 if (key.offset >= ekey->offset + left_len)
5189 btrfs_free_path(path);
5193 static int get_last_extent(struct send_ctx *sctx, u64 offset)
5195 struct btrfs_path *path;
5196 struct btrfs_root *root = sctx->send_root;
5197 struct btrfs_file_extent_item *fi;
5198 struct btrfs_key key;
5203 path = alloc_path_for_send();
5207 sctx->cur_inode_last_extent = 0;
5209 key.objectid = sctx->cur_ino;
5210 key.type = BTRFS_EXTENT_DATA_KEY;
5211 key.offset = offset;
5212 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5216 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5217 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5220 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5221 struct btrfs_file_extent_item);
5222 type = btrfs_file_extent_type(path->nodes[0], fi);
5223 if (type == BTRFS_FILE_EXTENT_INLINE) {
5224 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
5225 path->slots[0], fi);
5226 extent_end = ALIGN(key.offset + size,
5227 sctx->send_root->sectorsize);
5229 extent_end = key.offset +
5230 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5232 sctx->cur_inode_last_extent = extent_end;
5234 btrfs_free_path(path);
5238 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5239 struct btrfs_key *key)
5241 struct btrfs_file_extent_item *fi;
5246 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5249 if (sctx->cur_inode_last_extent == (u64)-1) {
5250 ret = get_last_extent(sctx, key->offset - 1);
5255 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5256 struct btrfs_file_extent_item);
5257 type = btrfs_file_extent_type(path->nodes[0], fi);
5258 if (type == BTRFS_FILE_EXTENT_INLINE) {
5259 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
5260 path->slots[0], fi);
5261 extent_end = ALIGN(key->offset + size,
5262 sctx->send_root->sectorsize);
5264 extent_end = key->offset +
5265 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5268 if (path->slots[0] == 0 &&
5269 sctx->cur_inode_last_extent < key->offset) {
5271 * We might have skipped entire leafs that contained only
5272 * file extent items for our current inode. These leafs have
5273 * a generation number smaller (older) than the one in the
5274 * current leaf and the leaf our last extent came from, and
5275 * are located between these 2 leafs.
5277 ret = get_last_extent(sctx, key->offset - 1);
5282 if (sctx->cur_inode_last_extent < key->offset)
5283 ret = send_hole(sctx, key->offset);
5284 sctx->cur_inode_last_extent = extent_end;
5288 static int process_extent(struct send_ctx *sctx,
5289 struct btrfs_path *path,
5290 struct btrfs_key *key)
5292 struct clone_root *found_clone = NULL;
5295 if (S_ISLNK(sctx->cur_inode_mode))
5298 if (sctx->parent_root && !sctx->cur_inode_new) {
5299 ret = is_extent_unchanged(sctx, path, key);
5307 struct btrfs_file_extent_item *ei;
5310 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5311 struct btrfs_file_extent_item);
5312 type = btrfs_file_extent_type(path->nodes[0], ei);
5313 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5314 type == BTRFS_FILE_EXTENT_REG) {
5316 * The send spec does not have a prealloc command yet,
5317 * so just leave a hole for prealloc'ed extents until
5318 * we have enough commands queued up to justify rev'ing
5321 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5326 /* Have a hole, just skip it. */
5327 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5334 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5335 sctx->cur_inode_size, &found_clone);
5336 if (ret != -ENOENT && ret < 0)
5339 ret = send_write_or_clone(sctx, path, key, found_clone);
5343 ret = maybe_send_hole(sctx, path, key);
5348 static int process_all_extents(struct send_ctx *sctx)
5351 struct btrfs_root *root;
5352 struct btrfs_path *path;
5353 struct btrfs_key key;
5354 struct btrfs_key found_key;
5355 struct extent_buffer *eb;
5358 root = sctx->send_root;
5359 path = alloc_path_for_send();
5363 key.objectid = sctx->cmp_key->objectid;
5364 key.type = BTRFS_EXTENT_DATA_KEY;
5366 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5371 eb = path->nodes[0];
5372 slot = path->slots[0];
5374 if (slot >= btrfs_header_nritems(eb)) {
5375 ret = btrfs_next_leaf(root, path);
5378 } else if (ret > 0) {
5385 btrfs_item_key_to_cpu(eb, &found_key, slot);
5387 if (found_key.objectid != key.objectid ||
5388 found_key.type != key.type) {
5393 ret = process_extent(sctx, path, &found_key);
5401 btrfs_free_path(path);
5405 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
5407 int *refs_processed)
5411 if (sctx->cur_ino == 0)
5413 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
5414 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
5416 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
5419 ret = process_recorded_refs(sctx, pending_move);
5423 *refs_processed = 1;
5428 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
5439 int pending_move = 0;
5440 int refs_processed = 0;
5442 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
5448 * We have processed the refs and thus need to advance send_progress.
5449 * Now, calls to get_cur_xxx will take the updated refs of the current
5450 * inode into account.
5452 * On the other hand, if our current inode is a directory and couldn't
5453 * be moved/renamed because its parent was renamed/moved too and it has
5454 * a higher inode number, we can only move/rename our current inode
5455 * after we moved/renamed its parent. Therefore in this case operate on
5456 * the old path (pre move/rename) of our current inode, and the
5457 * move/rename will be performed later.
5459 if (refs_processed && !pending_move)
5460 sctx->send_progress = sctx->cur_ino + 1;
5462 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
5464 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
5467 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
5468 &left_mode, &left_uid, &left_gid, NULL);
5472 if (!sctx->parent_root || sctx->cur_inode_new) {
5474 if (!S_ISLNK(sctx->cur_inode_mode))
5477 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
5478 NULL, NULL, &right_mode, &right_uid,
5483 if (left_uid != right_uid || left_gid != right_gid)
5485 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
5489 if (S_ISREG(sctx->cur_inode_mode)) {
5490 if (need_send_hole(sctx)) {
5491 if (sctx->cur_inode_last_extent == (u64)-1 ||
5492 sctx->cur_inode_last_extent <
5493 sctx->cur_inode_size) {
5494 ret = get_last_extent(sctx, (u64)-1);
5498 if (sctx->cur_inode_last_extent <
5499 sctx->cur_inode_size) {
5500 ret = send_hole(sctx, sctx->cur_inode_size);
5505 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5506 sctx->cur_inode_size);
5512 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5513 left_uid, left_gid);
5518 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5525 * If other directory inodes depended on our current directory
5526 * inode's move/rename, now do their move/rename operations.
5528 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
5529 ret = apply_children_dir_moves(sctx);
5533 * Need to send that every time, no matter if it actually
5534 * changed between the two trees as we have done changes to
5535 * the inode before. If our inode is a directory and it's
5536 * waiting to be moved/renamed, we will send its utimes when
5537 * it's moved/renamed, therefore we don't need to do it here.
5539 sctx->send_progress = sctx->cur_ino + 1;
5540 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
5549 static int changed_inode(struct send_ctx *sctx,
5550 enum btrfs_compare_tree_result result)
5553 struct btrfs_key *key = sctx->cmp_key;
5554 struct btrfs_inode_item *left_ii = NULL;
5555 struct btrfs_inode_item *right_ii = NULL;
5559 sctx->cur_ino = key->objectid;
5560 sctx->cur_inode_new_gen = 0;
5561 sctx->cur_inode_last_extent = (u64)-1;
5564 * Set send_progress to current inode. This will tell all get_cur_xxx
5565 * functions that the current inode's refs are not updated yet. Later,
5566 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5568 sctx->send_progress = sctx->cur_ino;
5570 if (result == BTRFS_COMPARE_TREE_NEW ||
5571 result == BTRFS_COMPARE_TREE_CHANGED) {
5572 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
5573 sctx->left_path->slots[0],
5574 struct btrfs_inode_item);
5575 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
5578 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5579 sctx->right_path->slots[0],
5580 struct btrfs_inode_item);
5581 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5584 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5585 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5586 sctx->right_path->slots[0],
5587 struct btrfs_inode_item);
5589 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5593 * The cur_ino = root dir case is special here. We can't treat
5594 * the inode as deleted+reused because it would generate a
5595 * stream that tries to delete/mkdir the root dir.
5597 if (left_gen != right_gen &&
5598 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5599 sctx->cur_inode_new_gen = 1;
5602 if (result == BTRFS_COMPARE_TREE_NEW) {
5603 sctx->cur_inode_gen = left_gen;
5604 sctx->cur_inode_new = 1;
5605 sctx->cur_inode_deleted = 0;
5606 sctx->cur_inode_size = btrfs_inode_size(
5607 sctx->left_path->nodes[0], left_ii);
5608 sctx->cur_inode_mode = btrfs_inode_mode(
5609 sctx->left_path->nodes[0], left_ii);
5610 sctx->cur_inode_rdev = btrfs_inode_rdev(
5611 sctx->left_path->nodes[0], left_ii);
5612 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5613 ret = send_create_inode_if_needed(sctx);
5614 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
5615 sctx->cur_inode_gen = right_gen;
5616 sctx->cur_inode_new = 0;
5617 sctx->cur_inode_deleted = 1;
5618 sctx->cur_inode_size = btrfs_inode_size(
5619 sctx->right_path->nodes[0], right_ii);
5620 sctx->cur_inode_mode = btrfs_inode_mode(
5621 sctx->right_path->nodes[0], right_ii);
5622 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
5624 * We need to do some special handling in case the inode was
5625 * reported as changed with a changed generation number. This
5626 * means that the original inode was deleted and new inode
5627 * reused the same inum. So we have to treat the old inode as
5628 * deleted and the new one as new.
5630 if (sctx->cur_inode_new_gen) {
5632 * First, process the inode as if it was deleted.
5634 sctx->cur_inode_gen = right_gen;
5635 sctx->cur_inode_new = 0;
5636 sctx->cur_inode_deleted = 1;
5637 sctx->cur_inode_size = btrfs_inode_size(
5638 sctx->right_path->nodes[0], right_ii);
5639 sctx->cur_inode_mode = btrfs_inode_mode(
5640 sctx->right_path->nodes[0], right_ii);
5641 ret = process_all_refs(sctx,
5642 BTRFS_COMPARE_TREE_DELETED);
5647 * Now process the inode as if it was new.
5649 sctx->cur_inode_gen = left_gen;
5650 sctx->cur_inode_new = 1;
5651 sctx->cur_inode_deleted = 0;
5652 sctx->cur_inode_size = btrfs_inode_size(
5653 sctx->left_path->nodes[0], left_ii);
5654 sctx->cur_inode_mode = btrfs_inode_mode(
5655 sctx->left_path->nodes[0], left_ii);
5656 sctx->cur_inode_rdev = btrfs_inode_rdev(
5657 sctx->left_path->nodes[0], left_ii);
5658 ret = send_create_inode_if_needed(sctx);
5662 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
5666 * Advance send_progress now as we did not get into
5667 * process_recorded_refs_if_needed in the new_gen case.
5669 sctx->send_progress = sctx->cur_ino + 1;
5672 * Now process all extents and xattrs of the inode as if
5673 * they were all new.
5675 ret = process_all_extents(sctx);
5678 ret = process_all_new_xattrs(sctx);
5682 sctx->cur_inode_gen = left_gen;
5683 sctx->cur_inode_new = 0;
5684 sctx->cur_inode_new_gen = 0;
5685 sctx->cur_inode_deleted = 0;
5686 sctx->cur_inode_size = btrfs_inode_size(
5687 sctx->left_path->nodes[0], left_ii);
5688 sctx->cur_inode_mode = btrfs_inode_mode(
5689 sctx->left_path->nodes[0], left_ii);
5698 * We have to process new refs before deleted refs, but compare_trees gives us
5699 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5700 * first and later process them in process_recorded_refs.
5701 * For the cur_inode_new_gen case, we skip recording completely because
5702 * changed_inode did already initiate processing of refs. The reason for this is
5703 * that in this case, compare_tree actually compares the refs of 2 different
5704 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5705 * refs of the right tree as deleted and all refs of the left tree as new.
5707 static int changed_ref(struct send_ctx *sctx,
5708 enum btrfs_compare_tree_result result)
5712 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5714 if (!sctx->cur_inode_new_gen &&
5715 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
5716 if (result == BTRFS_COMPARE_TREE_NEW)
5717 ret = record_new_ref(sctx);
5718 else if (result == BTRFS_COMPARE_TREE_DELETED)
5719 ret = record_deleted_ref(sctx);
5720 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5721 ret = record_changed_ref(sctx);
5728 * Process new/deleted/changed xattrs. We skip processing in the
5729 * cur_inode_new_gen case because changed_inode did already initiate processing
5730 * of xattrs. The reason is the same as in changed_ref
5732 static int changed_xattr(struct send_ctx *sctx,
5733 enum btrfs_compare_tree_result result)
5737 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5739 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5740 if (result == BTRFS_COMPARE_TREE_NEW)
5741 ret = process_new_xattr(sctx);
5742 else if (result == BTRFS_COMPARE_TREE_DELETED)
5743 ret = process_deleted_xattr(sctx);
5744 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5745 ret = process_changed_xattr(sctx);
5752 * Process new/deleted/changed extents. We skip processing in the
5753 * cur_inode_new_gen case because changed_inode did already initiate processing
5754 * of extents. The reason is the same as in changed_ref
5756 static int changed_extent(struct send_ctx *sctx,
5757 enum btrfs_compare_tree_result result)
5761 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5763 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5764 if (result != BTRFS_COMPARE_TREE_DELETED)
5765 ret = process_extent(sctx, sctx->left_path,
5772 static int dir_changed(struct send_ctx *sctx, u64 dir)
5774 u64 orig_gen, new_gen;
5777 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
5782 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
5787 return (orig_gen != new_gen) ? 1 : 0;
5790 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
5791 struct btrfs_key *key)
5793 struct btrfs_inode_extref *extref;
5794 struct extent_buffer *leaf;
5795 u64 dirid = 0, last_dirid = 0;
5802 /* Easy case, just check this one dirid */
5803 if (key->type == BTRFS_INODE_REF_KEY) {
5804 dirid = key->offset;
5806 ret = dir_changed(sctx, dirid);
5810 leaf = path->nodes[0];
5811 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
5812 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
5813 while (cur_offset < item_size) {
5814 extref = (struct btrfs_inode_extref *)(ptr +
5816 dirid = btrfs_inode_extref_parent(leaf, extref);
5817 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
5818 cur_offset += ref_name_len + sizeof(*extref);
5819 if (dirid == last_dirid)
5821 ret = dir_changed(sctx, dirid);
5831 * Updates compare related fields in sctx and simply forwards to the actual
5832 * changed_xxx functions.
5834 static int changed_cb(struct btrfs_root *left_root,
5835 struct btrfs_root *right_root,
5836 struct btrfs_path *left_path,
5837 struct btrfs_path *right_path,
5838 struct btrfs_key *key,
5839 enum btrfs_compare_tree_result result,
5843 struct send_ctx *sctx = ctx;
5845 if (result == BTRFS_COMPARE_TREE_SAME) {
5846 if (key->type == BTRFS_INODE_REF_KEY ||
5847 key->type == BTRFS_INODE_EXTREF_KEY) {
5848 ret = compare_refs(sctx, left_path, key);
5853 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
5854 return maybe_send_hole(sctx, left_path, key);
5858 result = BTRFS_COMPARE_TREE_CHANGED;
5862 sctx->left_path = left_path;
5863 sctx->right_path = right_path;
5864 sctx->cmp_key = key;
5866 ret = finish_inode_if_needed(sctx, 0);
5870 /* Ignore non-FS objects */
5871 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
5872 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
5875 if (key->type == BTRFS_INODE_ITEM_KEY)
5876 ret = changed_inode(sctx, result);
5877 else if (key->type == BTRFS_INODE_REF_KEY ||
5878 key->type == BTRFS_INODE_EXTREF_KEY)
5879 ret = changed_ref(sctx, result);
5880 else if (key->type == BTRFS_XATTR_ITEM_KEY)
5881 ret = changed_xattr(sctx, result);
5882 else if (key->type == BTRFS_EXTENT_DATA_KEY)
5883 ret = changed_extent(sctx, result);
5889 static int full_send_tree(struct send_ctx *sctx)
5892 struct btrfs_root *send_root = sctx->send_root;
5893 struct btrfs_key key;
5894 struct btrfs_key found_key;
5895 struct btrfs_path *path;
5896 struct extent_buffer *eb;
5899 path = alloc_path_for_send();
5903 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
5904 key.type = BTRFS_INODE_ITEM_KEY;
5907 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
5914 eb = path->nodes[0];
5915 slot = path->slots[0];
5916 btrfs_item_key_to_cpu(eb, &found_key, slot);
5918 ret = changed_cb(send_root, NULL, path, NULL,
5919 &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
5923 key.objectid = found_key.objectid;
5924 key.type = found_key.type;
5925 key.offset = found_key.offset + 1;
5927 ret = btrfs_next_item(send_root, path);
5937 ret = finish_inode_if_needed(sctx, 1);
5940 btrfs_free_path(path);
5944 static int send_subvol(struct send_ctx *sctx)
5948 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
5949 ret = send_header(sctx);
5954 ret = send_subvol_begin(sctx);
5958 if (sctx->parent_root) {
5959 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
5963 ret = finish_inode_if_needed(sctx, 1);
5967 ret = full_send_tree(sctx);
5973 free_recorded_refs(sctx);
5978 * If orphan cleanup did remove any orphans from a root, it means the tree
5979 * was modified and therefore the commit root is not the same as the current
5980 * root anymore. This is a problem, because send uses the commit root and
5981 * therefore can see inode items that don't exist in the current root anymore,
5982 * and for example make calls to btrfs_iget, which will do tree lookups based
5983 * on the current root and not on the commit root. Those lookups will fail,
5984 * returning a -ESTALE error, and making send fail with that error. So make
5985 * sure a send does not see any orphans we have just removed, and that it will
5986 * see the same inodes regardless of whether a transaction commit happened
5987 * before it started (meaning that the commit root will be the same as the
5988 * current root) or not.
5990 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
5993 struct btrfs_trans_handle *trans = NULL;
5996 if (sctx->parent_root &&
5997 sctx->parent_root->node != sctx->parent_root->commit_root)
6000 for (i = 0; i < sctx->clone_roots_cnt; i++)
6001 if (sctx->clone_roots[i].root->node !=
6002 sctx->clone_roots[i].root->commit_root)
6006 return btrfs_end_transaction(trans, sctx->send_root);
6011 /* Use any root, all fs roots will get their commit roots updated. */
6013 trans = btrfs_join_transaction(sctx->send_root);
6015 return PTR_ERR(trans);
6019 return btrfs_commit_transaction(trans, sctx->send_root);
6022 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
6024 spin_lock(&root->root_item_lock);
6025 root->send_in_progress--;
6027 * Not much left to do, we don't know why it's unbalanced and
6028 * can't blindly reset it to 0.
6030 if (root->send_in_progress < 0)
6031 btrfs_err(root->fs_info,
6032 "send_in_progres unbalanced %d root %llu",
6033 root->send_in_progress, root->root_key.objectid);
6034 spin_unlock(&root->root_item_lock);
6037 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6040 struct btrfs_root *send_root;
6041 struct btrfs_root *clone_root;
6042 struct btrfs_fs_info *fs_info;
6043 struct btrfs_ioctl_send_args *arg = NULL;
6044 struct btrfs_key key;
6045 struct send_ctx *sctx = NULL;
6047 u64 *clone_sources_tmp = NULL;
6048 int clone_sources_to_rollback = 0;
6049 unsigned alloc_size;
6050 int sort_clone_roots = 0;
6053 if (!capable(CAP_SYS_ADMIN))
6056 send_root = BTRFS_I(file_inode(mnt_file))->root;
6057 fs_info = send_root->fs_info;
6060 * The subvolume must remain read-only during send, protect against
6061 * making it RW. This also protects against deletion.
6063 spin_lock(&send_root->root_item_lock);
6064 send_root->send_in_progress++;
6065 spin_unlock(&send_root->root_item_lock);
6068 * This is done when we lookup the root, it should already be complete
6069 * by the time we get here.
6071 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
6074 * Userspace tools do the checks and warn the user if it's
6077 if (!btrfs_root_readonly(send_root)) {
6082 arg = memdup_user(arg_, sizeof(*arg));
6089 if (arg->clone_sources_count >
6090 ULLONG_MAX / sizeof(*arg->clone_sources)) {
6095 if (!access_ok(VERIFY_READ, arg->clone_sources,
6096 sizeof(*arg->clone_sources) *
6097 arg->clone_sources_count)) {
6102 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
6107 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
6113 INIT_LIST_HEAD(&sctx->new_refs);
6114 INIT_LIST_HEAD(&sctx->deleted_refs);
6115 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
6116 INIT_LIST_HEAD(&sctx->name_cache_list);
6118 sctx->flags = arg->flags;
6120 sctx->send_filp = fget(arg->send_fd);
6121 if (!sctx->send_filp) {
6126 sctx->send_root = send_root;
6128 * Unlikely but possible, if the subvolume is marked for deletion but
6129 * is slow to remove the directory entry, send can still be started
6131 if (btrfs_root_dead(sctx->send_root)) {
6136 sctx->clone_roots_cnt = arg->clone_sources_count;
6138 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
6139 sctx->send_buf = kmalloc(sctx->send_max_size, GFP_KERNEL | __GFP_NOWARN);
6140 if (!sctx->send_buf) {
6141 sctx->send_buf = vmalloc(sctx->send_max_size);
6142 if (!sctx->send_buf) {
6148 sctx->read_buf = kmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL | __GFP_NOWARN);
6149 if (!sctx->read_buf) {
6150 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
6151 if (!sctx->read_buf) {
6157 sctx->pending_dir_moves = RB_ROOT;
6158 sctx->waiting_dir_moves = RB_ROOT;
6159 sctx->orphan_dirs = RB_ROOT;
6161 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
6163 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN);
6164 if (!sctx->clone_roots) {
6165 sctx->clone_roots = vzalloc(alloc_size);
6166 if (!sctx->clone_roots) {
6172 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
6174 if (arg->clone_sources_count) {
6175 clone_sources_tmp = kmalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN);
6176 if (!clone_sources_tmp) {
6177 clone_sources_tmp = vmalloc(alloc_size);
6178 if (!clone_sources_tmp) {
6184 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
6191 for (i = 0; i < arg->clone_sources_count; i++) {
6192 key.objectid = clone_sources_tmp[i];
6193 key.type = BTRFS_ROOT_ITEM_KEY;
6194 key.offset = (u64)-1;
6196 index = srcu_read_lock(&fs_info->subvol_srcu);
6198 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
6199 if (IS_ERR(clone_root)) {
6200 srcu_read_unlock(&fs_info->subvol_srcu, index);
6201 ret = PTR_ERR(clone_root);
6204 spin_lock(&clone_root->root_item_lock);
6205 if (!btrfs_root_readonly(clone_root) ||
6206 btrfs_root_dead(clone_root)) {
6207 spin_unlock(&clone_root->root_item_lock);
6208 srcu_read_unlock(&fs_info->subvol_srcu, index);
6212 clone_root->send_in_progress++;
6213 spin_unlock(&clone_root->root_item_lock);
6214 srcu_read_unlock(&fs_info->subvol_srcu, index);
6216 sctx->clone_roots[i].root = clone_root;
6217 clone_sources_to_rollback = i + 1;
6219 kvfree(clone_sources_tmp);
6220 clone_sources_tmp = NULL;
6223 if (arg->parent_root) {
6224 key.objectid = arg->parent_root;
6225 key.type = BTRFS_ROOT_ITEM_KEY;
6226 key.offset = (u64)-1;
6228 index = srcu_read_lock(&fs_info->subvol_srcu);
6230 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
6231 if (IS_ERR(sctx->parent_root)) {
6232 srcu_read_unlock(&fs_info->subvol_srcu, index);
6233 ret = PTR_ERR(sctx->parent_root);
6237 spin_lock(&sctx->parent_root->root_item_lock);
6238 sctx->parent_root->send_in_progress++;
6239 if (!btrfs_root_readonly(sctx->parent_root) ||
6240 btrfs_root_dead(sctx->parent_root)) {
6241 spin_unlock(&sctx->parent_root->root_item_lock);
6242 srcu_read_unlock(&fs_info->subvol_srcu, index);
6246 spin_unlock(&sctx->parent_root->root_item_lock);
6248 srcu_read_unlock(&fs_info->subvol_srcu, index);
6252 * Clones from send_root are allowed, but only if the clone source
6253 * is behind the current send position. This is checked while searching
6254 * for possible clone sources.
6256 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
6258 /* We do a bsearch later */
6259 sort(sctx->clone_roots, sctx->clone_roots_cnt,
6260 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
6262 sort_clone_roots = 1;
6264 ret = ensure_commit_roots_uptodate(sctx);
6268 current->journal_info = BTRFS_SEND_TRANS_STUB;
6269 ret = send_subvol(sctx);
6270 current->journal_info = NULL;
6274 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
6275 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
6278 ret = send_cmd(sctx);
6284 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
6285 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
6287 struct pending_dir_move *pm;
6289 n = rb_first(&sctx->pending_dir_moves);
6290 pm = rb_entry(n, struct pending_dir_move, node);
6291 while (!list_empty(&pm->list)) {
6292 struct pending_dir_move *pm2;
6294 pm2 = list_first_entry(&pm->list,
6295 struct pending_dir_move, list);
6296 free_pending_move(sctx, pm2);
6298 free_pending_move(sctx, pm);
6301 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
6302 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
6304 struct waiting_dir_move *dm;
6306 n = rb_first(&sctx->waiting_dir_moves);
6307 dm = rb_entry(n, struct waiting_dir_move, node);
6308 rb_erase(&dm->node, &sctx->waiting_dir_moves);
6312 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
6313 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
6315 struct orphan_dir_info *odi;
6317 n = rb_first(&sctx->orphan_dirs);
6318 odi = rb_entry(n, struct orphan_dir_info, node);
6319 free_orphan_dir_info(sctx, odi);
6322 if (sort_clone_roots) {
6323 for (i = 0; i < sctx->clone_roots_cnt; i++)
6324 btrfs_root_dec_send_in_progress(
6325 sctx->clone_roots[i].root);
6327 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
6328 btrfs_root_dec_send_in_progress(
6329 sctx->clone_roots[i].root);
6331 btrfs_root_dec_send_in_progress(send_root);
6333 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
6334 btrfs_root_dec_send_in_progress(sctx->parent_root);
6337 kvfree(clone_sources_tmp);
6340 if (sctx->send_filp)
6341 fput(sctx->send_filp);
6343 kvfree(sctx->clone_roots);
6344 kvfree(sctx->send_buf);
6345 kvfree(sctx->read_buf);
6347 name_cache_free(sctx);