2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/crc32c.h>
28 #include <linux/vmalloc.h>
29 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
38 static int g_verbose = 0;
40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
43 * A fs_path is a helper to dynamically build path names with unknown size.
44 * It reallocates the internal buffer on demand.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
47 * unreversed if needed.
58 unsigned int reversed:1;
59 unsigned int virtual_mem:1;
65 #define FS_PATH_INLINE_SIZE \
66 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
69 /* reused for each extent */
71 struct btrfs_root *root;
78 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
79 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
82 struct file *send_filp;
88 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
89 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
93 struct btrfs_root *send_root;
94 struct btrfs_root *parent_root;
95 struct clone_root *clone_roots;
98 /* current state of the compare_tree call */
99 struct btrfs_path *left_path;
100 struct btrfs_path *right_path;
101 struct btrfs_key *cmp_key;
104 * infos of the currently processed inode. In case of deleted inodes,
105 * these are the values from the deleted inode.
110 int cur_inode_new_gen;
111 int cur_inode_deleted;
114 u64 cur_inode_last_extent;
118 struct list_head new_refs;
119 struct list_head deleted_refs;
121 struct radix_tree_root name_cache;
122 struct list_head name_cache_list;
128 struct name_cache_entry {
129 struct list_head list;
131 * radix_tree has only 32bit entries but we need to handle 64bit inums.
132 * We use the lower 32bit of the 64bit inum to store it in the tree. If
133 * more then one inum would fall into the same entry, we use radix_list
134 * to store the additional entries. radix_list is also used to store
135 * entries where two entries have the same inum but different
138 struct list_head radix_list;
144 int need_later_update;
149 static int need_send_hole(struct send_ctx *sctx)
151 return (sctx->parent_root && !sctx->cur_inode_new &&
152 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
153 S_ISREG(sctx->cur_inode_mode));
156 static void fs_path_reset(struct fs_path *p)
159 p->start = p->buf + p->buf_len - 1;
169 static struct fs_path *fs_path_alloc(void)
173 p = kmalloc(sizeof(*p), GFP_NOFS);
178 p->buf = p->inline_buf;
179 p->buf_len = FS_PATH_INLINE_SIZE;
184 static struct fs_path *fs_path_alloc_reversed(void)
196 static void fs_path_free(struct fs_path *p)
200 if (p->buf != p->inline_buf) {
209 static int fs_path_len(struct fs_path *p)
211 return p->end - p->start;
214 static int fs_path_ensure_buf(struct fs_path *p, int len)
222 if (p->buf_len >= len)
225 path_len = p->end - p->start;
226 old_buf_len = p->buf_len;
227 len = PAGE_ALIGN(len);
229 if (p->buf == p->inline_buf) {
230 tmp_buf = kmalloc(len, GFP_NOFS | __GFP_NOWARN);
232 tmp_buf = vmalloc(len);
237 memcpy(tmp_buf, p->buf, p->buf_len);
241 if (p->virtual_mem) {
242 tmp_buf = vmalloc(len);
245 memcpy(tmp_buf, p->buf, p->buf_len);
248 tmp_buf = krealloc(p->buf, len, GFP_NOFS);
250 tmp_buf = vmalloc(len);
253 memcpy(tmp_buf, p->buf, p->buf_len);
262 tmp_buf = p->buf + old_buf_len - path_len - 1;
263 p->end = p->buf + p->buf_len - 1;
264 p->start = p->end - path_len;
265 memmove(p->start, tmp_buf, path_len + 1);
268 p->end = p->start + path_len;
273 static int fs_path_prepare_for_add(struct fs_path *p, int name_len)
278 new_len = p->end - p->start + name_len;
279 if (p->start != p->end)
281 ret = fs_path_ensure_buf(p, new_len);
286 if (p->start != p->end)
288 p->start -= name_len;
289 p->prepared = p->start;
291 if (p->start != p->end)
293 p->prepared = p->end;
302 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
306 ret = fs_path_prepare_for_add(p, name_len);
309 memcpy(p->prepared, name, name_len);
316 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
320 ret = fs_path_prepare_for_add(p, p2->end - p2->start);
323 memcpy(p->prepared, p2->start, p2->end - p2->start);
330 static int fs_path_add_from_extent_buffer(struct fs_path *p,
331 struct extent_buffer *eb,
332 unsigned long off, int len)
336 ret = fs_path_prepare_for_add(p, len);
340 read_extent_buffer(eb, p->prepared, off, len);
348 static void fs_path_remove(struct fs_path *p)
351 while (p->start != p->end && *p->end != '/')
357 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
361 p->reversed = from->reversed;
364 ret = fs_path_add_path(p, from);
370 static void fs_path_unreverse(struct fs_path *p)
379 len = p->end - p->start;
381 p->end = p->start + len;
382 memmove(p->start, tmp, len + 1);
386 static struct btrfs_path *alloc_path_for_send(void)
388 struct btrfs_path *path;
390 path = btrfs_alloc_path();
393 path->search_commit_root = 1;
394 path->skip_locking = 1;
398 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
408 ret = vfs_write(filp, (char *)buf + pos, len - pos, off);
409 /* TODO handle that correctly */
410 /*if (ret == -ERESTARTSYS) {
429 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
431 struct btrfs_tlv_header *hdr;
432 int total_len = sizeof(*hdr) + len;
433 int left = sctx->send_max_size - sctx->send_size;
435 if (unlikely(left < total_len))
438 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
439 hdr->tlv_type = cpu_to_le16(attr);
440 hdr->tlv_len = cpu_to_le16(len);
441 memcpy(hdr + 1, data, len);
442 sctx->send_size += total_len;
448 static int tlv_put_u8(struct send_ctx *sctx, u16 attr, u8 value)
450 return tlv_put(sctx, attr, &value, sizeof(value));
453 static int tlv_put_u16(struct send_ctx *sctx, u16 attr, u16 value)
455 __le16 tmp = cpu_to_le16(value);
456 return tlv_put(sctx, attr, &tmp, sizeof(tmp));
459 static int tlv_put_u32(struct send_ctx *sctx, u16 attr, u32 value)
461 __le32 tmp = cpu_to_le32(value);
462 return tlv_put(sctx, attr, &tmp, sizeof(tmp));
466 static int tlv_put_u64(struct send_ctx *sctx, u16 attr, u64 value)
468 __le64 tmp = cpu_to_le64(value);
469 return tlv_put(sctx, attr, &tmp, sizeof(tmp));
472 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
473 const char *str, int len)
477 return tlv_put(sctx, attr, str, len);
480 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
483 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
487 static int tlv_put_timespec(struct send_ctx *sctx, u16 attr,
490 struct btrfs_timespec bts;
491 bts.sec = cpu_to_le64(ts->tv_sec);
492 bts.nsec = cpu_to_le32(ts->tv_nsec);
493 return tlv_put(sctx, attr, &bts, sizeof(bts));
497 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
498 struct extent_buffer *eb,
499 struct btrfs_timespec *ts)
501 struct btrfs_timespec bts;
502 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
503 return tlv_put(sctx, attr, &bts, sizeof(bts));
507 #define TLV_PUT(sctx, attrtype, attrlen, data) \
509 ret = tlv_put(sctx, attrtype, attrlen, data); \
511 goto tlv_put_failure; \
514 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
516 ret = tlv_put_u##bits(sctx, attrtype, value); \
518 goto tlv_put_failure; \
521 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
522 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
523 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
524 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
525 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
527 ret = tlv_put_string(sctx, attrtype, str, len); \
529 goto tlv_put_failure; \
531 #define TLV_PUT_PATH(sctx, attrtype, p) \
533 ret = tlv_put_string(sctx, attrtype, p->start, \
534 p->end - p->start); \
536 goto tlv_put_failure; \
538 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
540 ret = tlv_put_uuid(sctx, attrtype, uuid); \
542 goto tlv_put_failure; \
544 #define TLV_PUT_TIMESPEC(sctx, attrtype, ts) \
546 ret = tlv_put_timespec(sctx, attrtype, ts); \
548 goto tlv_put_failure; \
550 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
552 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
554 goto tlv_put_failure; \
557 static int send_header(struct send_ctx *sctx)
559 struct btrfs_stream_header hdr;
561 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
562 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
564 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
569 * For each command/item we want to send to userspace, we call this function.
571 static int begin_cmd(struct send_ctx *sctx, int cmd)
573 struct btrfs_cmd_header *hdr;
575 if (WARN_ON(!sctx->send_buf))
578 BUG_ON(sctx->send_size);
580 sctx->send_size += sizeof(*hdr);
581 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
582 hdr->cmd = cpu_to_le16(cmd);
587 static int send_cmd(struct send_ctx *sctx)
590 struct btrfs_cmd_header *hdr;
593 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
594 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
597 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
598 hdr->crc = cpu_to_le32(crc);
600 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
603 sctx->total_send_size += sctx->send_size;
604 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
611 * Sends a move instruction to user space
613 static int send_rename(struct send_ctx *sctx,
614 struct fs_path *from, struct fs_path *to)
618 verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
620 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
624 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
625 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
627 ret = send_cmd(sctx);
635 * Sends a link instruction to user space
637 static int send_link(struct send_ctx *sctx,
638 struct fs_path *path, struct fs_path *lnk)
642 verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
644 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
648 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
649 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
651 ret = send_cmd(sctx);
659 * Sends an unlink instruction to user space
661 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
665 verbose_printk("btrfs: send_unlink %s\n", path->start);
667 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
671 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
673 ret = send_cmd(sctx);
681 * Sends a rmdir instruction to user space
683 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
687 verbose_printk("btrfs: send_rmdir %s\n", path->start);
689 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
693 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
695 ret = send_cmd(sctx);
703 * Helper function to retrieve some fields from an inode item.
705 static int get_inode_info(struct btrfs_root *root,
706 u64 ino, u64 *size, u64 *gen,
707 u64 *mode, u64 *uid, u64 *gid,
711 struct btrfs_inode_item *ii;
712 struct btrfs_key key;
713 struct btrfs_path *path;
715 path = alloc_path_for_send();
720 key.type = BTRFS_INODE_ITEM_KEY;
722 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
730 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
731 struct btrfs_inode_item);
733 *size = btrfs_inode_size(path->nodes[0], ii);
735 *gen = btrfs_inode_generation(path->nodes[0], ii);
737 *mode = btrfs_inode_mode(path->nodes[0], ii);
739 *uid = btrfs_inode_uid(path->nodes[0], ii);
741 *gid = btrfs_inode_gid(path->nodes[0], ii);
743 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
746 btrfs_free_path(path);
750 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
755 * Helper function to iterate the entries in ONE btrfs_inode_ref or
756 * btrfs_inode_extref.
757 * The iterate callback may return a non zero value to stop iteration. This can
758 * be a negative value for error codes or 1 to simply stop it.
760 * path must point to the INODE_REF or INODE_EXTREF when called.
762 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
763 struct btrfs_key *found_key, int resolve,
764 iterate_inode_ref_t iterate, void *ctx)
766 struct extent_buffer *eb = path->nodes[0];
767 struct btrfs_item *item;
768 struct btrfs_inode_ref *iref;
769 struct btrfs_inode_extref *extref;
770 struct btrfs_path *tmp_path;
774 int slot = path->slots[0];
781 unsigned long name_off;
782 unsigned long elem_size;
785 p = fs_path_alloc_reversed();
789 tmp_path = alloc_path_for_send();
796 if (found_key->type == BTRFS_INODE_REF_KEY) {
797 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
798 struct btrfs_inode_ref);
799 item = btrfs_item_nr(slot);
800 total = btrfs_item_size(eb, item);
801 elem_size = sizeof(*iref);
803 ptr = btrfs_item_ptr_offset(eb, slot);
804 total = btrfs_item_size_nr(eb, slot);
805 elem_size = sizeof(*extref);
808 while (cur < total) {
811 if (found_key->type == BTRFS_INODE_REF_KEY) {
812 iref = (struct btrfs_inode_ref *)(ptr + cur);
813 name_len = btrfs_inode_ref_name_len(eb, iref);
814 name_off = (unsigned long)(iref + 1);
815 index = btrfs_inode_ref_index(eb, iref);
816 dir = found_key->offset;
818 extref = (struct btrfs_inode_extref *)(ptr + cur);
819 name_len = btrfs_inode_extref_name_len(eb, extref);
820 name_off = (unsigned long)&extref->name;
821 index = btrfs_inode_extref_index(eb, extref);
822 dir = btrfs_inode_extref_parent(eb, extref);
826 start = btrfs_ref_to_path(root, tmp_path, name_len,
830 ret = PTR_ERR(start);
833 if (start < p->buf) {
834 /* overflow , try again with larger buffer */
835 ret = fs_path_ensure_buf(p,
836 p->buf_len + p->buf - start);
839 start = btrfs_ref_to_path(root, tmp_path,
844 ret = PTR_ERR(start);
847 BUG_ON(start < p->buf);
851 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
857 cur += elem_size + name_len;
858 ret = iterate(num, dir, index, p, ctx);
865 btrfs_free_path(tmp_path);
870 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
871 const char *name, int name_len,
872 const char *data, int data_len,
876 * Helper function to iterate the entries in ONE btrfs_dir_item.
877 * The iterate callback may return a non zero value to stop iteration. This can
878 * be a negative value for error codes or 1 to simply stop it.
880 * path must point to the dir item when called.
882 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
883 struct btrfs_key *found_key,
884 iterate_dir_item_t iterate, void *ctx)
887 struct extent_buffer *eb;
888 struct btrfs_item *item;
889 struct btrfs_dir_item *di;
890 struct btrfs_key di_key;
905 buf = kmalloc(buf_len, GFP_NOFS);
912 slot = path->slots[0];
913 item = btrfs_item_nr(slot);
914 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
917 total = btrfs_item_size(eb, item);
920 while (cur < total) {
921 name_len = btrfs_dir_name_len(eb, di);
922 data_len = btrfs_dir_data_len(eb, di);
923 type = btrfs_dir_type(eb, di);
924 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
926 if (name_len + data_len > buf_len) {
927 buf_len = PAGE_ALIGN(name_len + data_len);
929 buf2 = vmalloc(buf_len);
936 buf2 = krealloc(buf, buf_len, GFP_NOFS);
938 buf2 = vmalloc(buf_len);
952 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
953 name_len + data_len);
955 len = sizeof(*di) + name_len + data_len;
956 di = (struct btrfs_dir_item *)((char *)di + len);
959 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
960 data_len, type, ctx);
979 static int __copy_first_ref(int num, u64 dir, int index,
980 struct fs_path *p, void *ctx)
983 struct fs_path *pt = ctx;
985 ret = fs_path_copy(pt, p);
989 /* we want the first only */
994 * Retrieve the first path of an inode. If an inode has more then one
995 * ref/hardlink, this is ignored.
997 static int get_inode_path(struct btrfs_root *root,
998 u64 ino, struct fs_path *path)
1001 struct btrfs_key key, found_key;
1002 struct btrfs_path *p;
1004 p = alloc_path_for_send();
1008 fs_path_reset(path);
1011 key.type = BTRFS_INODE_REF_KEY;
1014 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1021 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1022 if (found_key.objectid != ino ||
1023 (found_key.type != BTRFS_INODE_REF_KEY &&
1024 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1029 ret = iterate_inode_ref(root, p, &found_key, 1,
1030 __copy_first_ref, path);
1040 struct backref_ctx {
1041 struct send_ctx *sctx;
1043 /* number of total found references */
1047 * used for clones found in send_root. clones found behind cur_objectid
1048 * and cur_offset are not considered as allowed clones.
1053 /* may be truncated in case it's the last extent in a file */
1056 /* Just to check for bugs in backref resolving */
1060 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1062 u64 root = (u64)(uintptr_t)key;
1063 struct clone_root *cr = (struct clone_root *)elt;
1065 if (root < cr->root->objectid)
1067 if (root > cr->root->objectid)
1072 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1074 struct clone_root *cr1 = (struct clone_root *)e1;
1075 struct clone_root *cr2 = (struct clone_root *)e2;
1077 if (cr1->root->objectid < cr2->root->objectid)
1079 if (cr1->root->objectid > cr2->root->objectid)
1085 * Called for every backref that is found for the current extent.
1086 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1088 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1090 struct backref_ctx *bctx = ctx_;
1091 struct clone_root *found;
1095 /* First check if the root is in the list of accepted clone sources */
1096 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1097 bctx->sctx->clone_roots_cnt,
1098 sizeof(struct clone_root),
1099 __clone_root_cmp_bsearch);
1103 if (found->root == bctx->sctx->send_root &&
1104 ino == bctx->cur_objectid &&
1105 offset == bctx->cur_offset) {
1106 bctx->found_itself = 1;
1110 * There are inodes that have extents that lie behind its i_size. Don't
1111 * accept clones from these extents.
1113 ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL,
1118 if (offset + bctx->extent_len > i_size)
1122 * Make sure we don't consider clones from send_root that are
1123 * behind the current inode/offset.
1125 if (found->root == bctx->sctx->send_root) {
1127 * TODO for the moment we don't accept clones from the inode
1128 * that is currently send. We may change this when
1129 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1132 if (ino >= bctx->cur_objectid)
1135 if (ino > bctx->cur_objectid)
1137 if (offset + bctx->extent_len > bctx->cur_offset)
1143 found->found_refs++;
1144 if (ino < found->ino) {
1146 found->offset = offset;
1147 } else if (found->ino == ino) {
1149 * same extent found more then once in the same file.
1151 if (found->offset > offset + bctx->extent_len)
1152 found->offset = offset;
1159 * Given an inode, offset and extent item, it finds a good clone for a clone
1160 * instruction. Returns -ENOENT when none could be found. The function makes
1161 * sure that the returned clone is usable at the point where sending is at the
1162 * moment. This means, that no clones are accepted which lie behind the current
1165 * path must point to the extent item when called.
1167 static int find_extent_clone(struct send_ctx *sctx,
1168 struct btrfs_path *path,
1169 u64 ino, u64 data_offset,
1171 struct clone_root **found)
1178 u64 extent_item_pos;
1180 struct btrfs_file_extent_item *fi;
1181 struct extent_buffer *eb = path->nodes[0];
1182 struct backref_ctx *backref_ctx = NULL;
1183 struct clone_root *cur_clone_root;
1184 struct btrfs_key found_key;
1185 struct btrfs_path *tmp_path;
1189 tmp_path = alloc_path_for_send();
1193 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
1199 if (data_offset >= ino_size) {
1201 * There may be extents that lie behind the file's size.
1202 * I at least had this in combination with snapshotting while
1203 * writing large files.
1209 fi = btrfs_item_ptr(eb, path->slots[0],
1210 struct btrfs_file_extent_item);
1211 extent_type = btrfs_file_extent_type(eb, fi);
1212 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1216 compressed = btrfs_file_extent_compression(eb, fi);
1218 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1219 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1220 if (disk_byte == 0) {
1224 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1226 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
1227 &found_key, &flags);
1228 btrfs_release_path(tmp_path);
1232 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1238 * Setup the clone roots.
1240 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1241 cur_clone_root = sctx->clone_roots + i;
1242 cur_clone_root->ino = (u64)-1;
1243 cur_clone_root->offset = 0;
1244 cur_clone_root->found_refs = 0;
1247 backref_ctx->sctx = sctx;
1248 backref_ctx->found = 0;
1249 backref_ctx->cur_objectid = ino;
1250 backref_ctx->cur_offset = data_offset;
1251 backref_ctx->found_itself = 0;
1252 backref_ctx->extent_len = num_bytes;
1255 * The last extent of a file may be too large due to page alignment.
1256 * We need to adjust extent_len in this case so that the checks in
1257 * __iterate_backrefs work.
1259 if (data_offset + num_bytes >= ino_size)
1260 backref_ctx->extent_len = ino_size - data_offset;
1263 * Now collect all backrefs.
1265 if (compressed == BTRFS_COMPRESS_NONE)
1266 extent_item_pos = logical - found_key.objectid;
1268 extent_item_pos = 0;
1270 extent_item_pos = logical - found_key.objectid;
1271 ret = iterate_extent_inodes(sctx->send_root->fs_info,
1272 found_key.objectid, extent_item_pos, 1,
1273 __iterate_backrefs, backref_ctx);
1278 if (!backref_ctx->found_itself) {
1279 /* found a bug in backref code? */
1281 printk(KERN_ERR "btrfs: ERROR did not find backref in "
1282 "send_root. inode=%llu, offset=%llu, "
1283 "disk_byte=%llu found extent=%llu\n",
1284 ino, data_offset, disk_byte, found_key.objectid);
1288 verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
1290 "num_bytes=%llu, logical=%llu\n",
1291 data_offset, ino, num_bytes, logical);
1293 if (!backref_ctx->found)
1294 verbose_printk("btrfs: no clones found\n");
1296 cur_clone_root = NULL;
1297 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1298 if (sctx->clone_roots[i].found_refs) {
1299 if (!cur_clone_root)
1300 cur_clone_root = sctx->clone_roots + i;
1301 else if (sctx->clone_roots[i].root == sctx->send_root)
1302 /* prefer clones from send_root over others */
1303 cur_clone_root = sctx->clone_roots + i;
1308 if (cur_clone_root) {
1309 *found = cur_clone_root;
1316 btrfs_free_path(tmp_path);
1321 static int read_symlink(struct btrfs_root *root,
1323 struct fs_path *dest)
1326 struct btrfs_path *path;
1327 struct btrfs_key key;
1328 struct btrfs_file_extent_item *ei;
1334 path = alloc_path_for_send();
1339 key.type = BTRFS_EXTENT_DATA_KEY;
1341 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1346 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1347 struct btrfs_file_extent_item);
1348 type = btrfs_file_extent_type(path->nodes[0], ei);
1349 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1350 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1351 BUG_ON(compression);
1353 off = btrfs_file_extent_inline_start(ei);
1354 len = btrfs_file_extent_inline_len(path->nodes[0], ei);
1356 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1359 btrfs_free_path(path);
1364 * Helper function to generate a file name that is unique in the root of
1365 * send_root and parent_root. This is used to generate names for orphan inodes.
1367 static int gen_unique_name(struct send_ctx *sctx,
1369 struct fs_path *dest)
1372 struct btrfs_path *path;
1373 struct btrfs_dir_item *di;
1378 path = alloc_path_for_send();
1383 len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu",
1385 if (len >= sizeof(tmp)) {
1386 /* should really not happen */
1391 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1392 path, BTRFS_FIRST_FREE_OBJECTID,
1393 tmp, strlen(tmp), 0);
1394 btrfs_release_path(path);
1400 /* not unique, try again */
1405 if (!sctx->parent_root) {
1411 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1412 path, BTRFS_FIRST_FREE_OBJECTID,
1413 tmp, strlen(tmp), 0);
1414 btrfs_release_path(path);
1420 /* not unique, try again */
1428 ret = fs_path_add(dest, tmp, strlen(tmp));
1431 btrfs_free_path(path);
1436 inode_state_no_change,
1437 inode_state_will_create,
1438 inode_state_did_create,
1439 inode_state_will_delete,
1440 inode_state_did_delete,
1443 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1451 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1453 if (ret < 0 && ret != -ENOENT)
1457 if (!sctx->parent_root) {
1458 right_ret = -ENOENT;
1460 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1461 NULL, NULL, NULL, NULL);
1462 if (ret < 0 && ret != -ENOENT)
1467 if (!left_ret && !right_ret) {
1468 if (left_gen == gen && right_gen == gen) {
1469 ret = inode_state_no_change;
1470 } else if (left_gen == gen) {
1471 if (ino < sctx->send_progress)
1472 ret = inode_state_did_create;
1474 ret = inode_state_will_create;
1475 } else if (right_gen == gen) {
1476 if (ino < sctx->send_progress)
1477 ret = inode_state_did_delete;
1479 ret = inode_state_will_delete;
1483 } else if (!left_ret) {
1484 if (left_gen == gen) {
1485 if (ino < sctx->send_progress)
1486 ret = inode_state_did_create;
1488 ret = inode_state_will_create;
1492 } else if (!right_ret) {
1493 if (right_gen == gen) {
1494 if (ino < sctx->send_progress)
1495 ret = inode_state_did_delete;
1497 ret = inode_state_will_delete;
1509 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1513 ret = get_cur_inode_state(sctx, ino, gen);
1517 if (ret == inode_state_no_change ||
1518 ret == inode_state_did_create ||
1519 ret == inode_state_will_delete)
1529 * Helper function to lookup a dir item in a dir.
1531 static int lookup_dir_item_inode(struct btrfs_root *root,
1532 u64 dir, const char *name, int name_len,
1537 struct btrfs_dir_item *di;
1538 struct btrfs_key key;
1539 struct btrfs_path *path;
1541 path = alloc_path_for_send();
1545 di = btrfs_lookup_dir_item(NULL, root, path,
1546 dir, name, name_len, 0);
1555 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1556 *found_inode = key.objectid;
1557 *found_type = btrfs_dir_type(path->nodes[0], di);
1560 btrfs_free_path(path);
1565 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1566 * generation of the parent dir and the name of the dir entry.
1568 static int get_first_ref(struct btrfs_root *root, u64 ino,
1569 u64 *dir, u64 *dir_gen, struct fs_path *name)
1572 struct btrfs_key key;
1573 struct btrfs_key found_key;
1574 struct btrfs_path *path;
1578 path = alloc_path_for_send();
1583 key.type = BTRFS_INODE_REF_KEY;
1586 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1590 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1592 if (ret || found_key.objectid != ino ||
1593 (found_key.type != BTRFS_INODE_REF_KEY &&
1594 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1599 if (key.type == BTRFS_INODE_REF_KEY) {
1600 struct btrfs_inode_ref *iref;
1601 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1602 struct btrfs_inode_ref);
1603 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1604 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1605 (unsigned long)(iref + 1),
1607 parent_dir = found_key.offset;
1609 struct btrfs_inode_extref *extref;
1610 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1611 struct btrfs_inode_extref);
1612 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1613 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1614 (unsigned long)&extref->name, len);
1615 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1619 btrfs_release_path(path);
1621 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
1629 btrfs_free_path(path);
1633 static int is_first_ref(struct btrfs_root *root,
1635 const char *name, int name_len)
1638 struct fs_path *tmp_name;
1642 tmp_name = fs_path_alloc();
1646 ret = get_first_ref(root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
1650 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1655 ret = !memcmp(tmp_name->start, name, name_len);
1658 fs_path_free(tmp_name);
1663 * Used by process_recorded_refs to determine if a new ref would overwrite an
1664 * already existing ref. In case it detects an overwrite, it returns the
1665 * inode/gen in who_ino/who_gen.
1666 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1667 * to make sure later references to the overwritten inode are possible.
1668 * Orphanizing is however only required for the first ref of an inode.
1669 * process_recorded_refs does an additional is_first_ref check to see if
1670 * orphanizing is really required.
1672 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1673 const char *name, int name_len,
1674 u64 *who_ino, u64 *who_gen)
1678 u64 other_inode = 0;
1681 if (!sctx->parent_root)
1684 ret = is_inode_existent(sctx, dir, dir_gen);
1689 * If we have a parent root we need to verify that the parent dir was
1690 * not delted and then re-created, if it was then we have no overwrite
1691 * and we can just unlink this entry.
1693 if (sctx->parent_root) {
1694 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1696 if (ret < 0 && ret != -ENOENT)
1706 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1707 &other_inode, &other_type);
1708 if (ret < 0 && ret != -ENOENT)
1716 * Check if the overwritten ref was already processed. If yes, the ref
1717 * was already unlinked/moved, so we can safely assume that we will not
1718 * overwrite anything at this point in time.
1720 if (other_inode > sctx->send_progress) {
1721 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1722 who_gen, NULL, NULL, NULL, NULL);
1727 *who_ino = other_inode;
1737 * Checks if the ref was overwritten by an already processed inode. This is
1738 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1739 * thus the orphan name needs be used.
1740 * process_recorded_refs also uses it to avoid unlinking of refs that were
1743 static int did_overwrite_ref(struct send_ctx *sctx,
1744 u64 dir, u64 dir_gen,
1745 u64 ino, u64 ino_gen,
1746 const char *name, int name_len)
1753 if (!sctx->parent_root)
1756 ret = is_inode_existent(sctx, dir, dir_gen);
1760 /* check if the ref was overwritten by another ref */
1761 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1762 &ow_inode, &other_type);
1763 if (ret < 0 && ret != -ENOENT)
1766 /* was never and will never be overwritten */
1771 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1776 if (ow_inode == ino && gen == ino_gen) {
1781 /* we know that it is or will be overwritten. check this now */
1782 if (ow_inode < sctx->send_progress)
1792 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1793 * that got overwritten. This is used by process_recorded_refs to determine
1794 * if it has to use the path as returned by get_cur_path or the orphan name.
1796 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1799 struct fs_path *name = NULL;
1803 if (!sctx->parent_root)
1806 name = fs_path_alloc();
1810 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1814 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1815 name->start, fs_path_len(name));
1823 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1824 * so we need to do some special handling in case we have clashes. This function
1825 * takes care of this with the help of name_cache_entry::radix_list.
1826 * In case of error, nce is kfreed.
1828 static int name_cache_insert(struct send_ctx *sctx,
1829 struct name_cache_entry *nce)
1832 struct list_head *nce_head;
1834 nce_head = radix_tree_lookup(&sctx->name_cache,
1835 (unsigned long)nce->ino);
1837 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
1842 INIT_LIST_HEAD(nce_head);
1844 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
1851 list_add_tail(&nce->radix_list, nce_head);
1852 list_add_tail(&nce->list, &sctx->name_cache_list);
1853 sctx->name_cache_size++;
1858 static void name_cache_delete(struct send_ctx *sctx,
1859 struct name_cache_entry *nce)
1861 struct list_head *nce_head;
1863 nce_head = radix_tree_lookup(&sctx->name_cache,
1864 (unsigned long)nce->ino);
1867 list_del(&nce->radix_list);
1868 list_del(&nce->list);
1869 sctx->name_cache_size--;
1871 if (list_empty(nce_head)) {
1872 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
1877 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
1880 struct list_head *nce_head;
1881 struct name_cache_entry *cur;
1883 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
1887 list_for_each_entry(cur, nce_head, radix_list) {
1888 if (cur->ino == ino && cur->gen == gen)
1895 * Removes the entry from the list and adds it back to the end. This marks the
1896 * entry as recently used so that name_cache_clean_unused does not remove it.
1898 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
1900 list_del(&nce->list);
1901 list_add_tail(&nce->list, &sctx->name_cache_list);
1905 * Remove some entries from the beginning of name_cache_list.
1907 static void name_cache_clean_unused(struct send_ctx *sctx)
1909 struct name_cache_entry *nce;
1911 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
1914 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
1915 nce = list_entry(sctx->name_cache_list.next,
1916 struct name_cache_entry, list);
1917 name_cache_delete(sctx, nce);
1922 static void name_cache_free(struct send_ctx *sctx)
1924 struct name_cache_entry *nce;
1926 while (!list_empty(&sctx->name_cache_list)) {
1927 nce = list_entry(sctx->name_cache_list.next,
1928 struct name_cache_entry, list);
1929 name_cache_delete(sctx, nce);
1935 * Used by get_cur_path for each ref up to the root.
1936 * Returns 0 if it succeeded.
1937 * Returns 1 if the inode is not existent or got overwritten. In that case, the
1938 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
1939 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
1940 * Returns <0 in case of error.
1942 static int __get_cur_name_and_parent(struct send_ctx *sctx,
1946 struct fs_path *dest)
1950 struct btrfs_path *path = NULL;
1951 struct name_cache_entry *nce = NULL;
1954 * First check if we already did a call to this function with the same
1955 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
1956 * return the cached result.
1958 nce = name_cache_search(sctx, ino, gen);
1960 if (ino < sctx->send_progress && nce->need_later_update) {
1961 name_cache_delete(sctx, nce);
1965 name_cache_used(sctx, nce);
1966 *parent_ino = nce->parent_ino;
1967 *parent_gen = nce->parent_gen;
1968 ret = fs_path_add(dest, nce->name, nce->name_len);
1976 path = alloc_path_for_send();
1981 * If the inode is not existent yet, add the orphan name and return 1.
1982 * This should only happen for the parent dir that we determine in
1985 ret = is_inode_existent(sctx, ino, gen);
1990 ret = gen_unique_name(sctx, ino, gen, dest);
1998 * Depending on whether the inode was already processed or not, use
1999 * send_root or parent_root for ref lookup.
2001 if (ino < sctx->send_progress)
2002 ret = get_first_ref(sctx->send_root, ino,
2003 parent_ino, parent_gen, dest);
2005 ret = get_first_ref(sctx->parent_root, ino,
2006 parent_ino, parent_gen, dest);
2011 * Check if the ref was overwritten by an inode's ref that was processed
2012 * earlier. If yes, treat as orphan and return 1.
2014 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2015 dest->start, dest->end - dest->start);
2019 fs_path_reset(dest);
2020 ret = gen_unique_name(sctx, ino, gen, dest);
2028 * Store the result of the lookup in the name cache.
2030 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
2038 nce->parent_ino = *parent_ino;
2039 nce->parent_gen = *parent_gen;
2040 nce->name_len = fs_path_len(dest);
2042 strcpy(nce->name, dest->start);
2044 if (ino < sctx->send_progress)
2045 nce->need_later_update = 0;
2047 nce->need_later_update = 1;
2049 nce_ret = name_cache_insert(sctx, nce);
2052 name_cache_clean_unused(sctx);
2055 btrfs_free_path(path);
2060 * Magic happens here. This function returns the first ref to an inode as it
2061 * would look like while receiving the stream at this point in time.
2062 * We walk the path up to the root. For every inode in between, we check if it
2063 * was already processed/sent. If yes, we continue with the parent as found
2064 * in send_root. If not, we continue with the parent as found in parent_root.
2065 * If we encounter an inode that was deleted at this point in time, we use the
2066 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2067 * that were not created yet and overwritten inodes/refs.
2069 * When do we have have orphan inodes:
2070 * 1. When an inode is freshly created and thus no valid refs are available yet
2071 * 2. When a directory lost all it's refs (deleted) but still has dir items
2072 * inside which were not processed yet (pending for move/delete). If anyone
2073 * tried to get the path to the dir items, it would get a path inside that
2075 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2076 * of an unprocessed inode. If in that case the first ref would be
2077 * overwritten, the overwritten inode gets "orphanized". Later when we
2078 * process this overwritten inode, it is restored at a new place by moving
2081 * sctx->send_progress tells this function at which point in time receiving
2084 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2085 struct fs_path *dest)
2088 struct fs_path *name = NULL;
2089 u64 parent_inode = 0;
2093 name = fs_path_alloc();
2100 fs_path_reset(dest);
2102 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2103 fs_path_reset(name);
2105 ret = __get_cur_name_and_parent(sctx, ino, gen,
2106 &parent_inode, &parent_gen, name);
2112 ret = fs_path_add_path(dest, name);
2123 fs_path_unreverse(dest);
2128 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2130 static int send_subvol_begin(struct send_ctx *sctx)
2133 struct btrfs_root *send_root = sctx->send_root;
2134 struct btrfs_root *parent_root = sctx->parent_root;
2135 struct btrfs_path *path;
2136 struct btrfs_key key;
2137 struct btrfs_root_ref *ref;
2138 struct extent_buffer *leaf;
2142 path = alloc_path_for_send();
2146 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
2148 btrfs_free_path(path);
2152 key.objectid = send_root->objectid;
2153 key.type = BTRFS_ROOT_BACKREF_KEY;
2156 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2165 leaf = path->nodes[0];
2166 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2167 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2168 key.objectid != send_root->objectid) {
2172 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2173 namelen = btrfs_root_ref_name_len(leaf, ref);
2174 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2175 btrfs_release_path(path);
2178 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2182 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2187 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2188 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2189 sctx->send_root->root_item.uuid);
2190 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2191 le64_to_cpu(sctx->send_root->root_item.ctransid));
2193 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2194 sctx->parent_root->root_item.uuid);
2195 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2196 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2199 ret = send_cmd(sctx);
2203 btrfs_free_path(path);
2208 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2213 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
2215 p = fs_path_alloc();
2219 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2223 ret = get_cur_path(sctx, ino, gen, p);
2226 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2227 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2229 ret = send_cmd(sctx);
2237 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2242 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
2244 p = fs_path_alloc();
2248 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2252 ret = get_cur_path(sctx, ino, gen, p);
2255 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2256 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2258 ret = send_cmd(sctx);
2266 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2271 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
2273 p = fs_path_alloc();
2277 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2281 ret = get_cur_path(sctx, ino, gen, p);
2284 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2285 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2286 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2288 ret = send_cmd(sctx);
2296 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2299 struct fs_path *p = NULL;
2300 struct btrfs_inode_item *ii;
2301 struct btrfs_path *path = NULL;
2302 struct extent_buffer *eb;
2303 struct btrfs_key key;
2306 verbose_printk("btrfs: send_utimes %llu\n", ino);
2308 p = fs_path_alloc();
2312 path = alloc_path_for_send();
2319 key.type = BTRFS_INODE_ITEM_KEY;
2321 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2325 eb = path->nodes[0];
2326 slot = path->slots[0];
2327 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2329 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2333 ret = get_cur_path(sctx, ino, gen, p);
2336 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2337 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
2338 btrfs_inode_atime(ii));
2339 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
2340 btrfs_inode_mtime(ii));
2341 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
2342 btrfs_inode_ctime(ii));
2343 /* TODO Add otime support when the otime patches get into upstream */
2345 ret = send_cmd(sctx);
2350 btrfs_free_path(path);
2355 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2356 * a valid path yet because we did not process the refs yet. So, the inode
2357 * is created as orphan.
2359 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2368 verbose_printk("btrfs: send_create_inode %llu\n", ino);
2370 p = fs_path_alloc();
2374 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL,
2379 if (S_ISREG(mode)) {
2380 cmd = BTRFS_SEND_C_MKFILE;
2381 } else if (S_ISDIR(mode)) {
2382 cmd = BTRFS_SEND_C_MKDIR;
2383 } else if (S_ISLNK(mode)) {
2384 cmd = BTRFS_SEND_C_SYMLINK;
2385 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2386 cmd = BTRFS_SEND_C_MKNOD;
2387 } else if (S_ISFIFO(mode)) {
2388 cmd = BTRFS_SEND_C_MKFIFO;
2389 } else if (S_ISSOCK(mode)) {
2390 cmd = BTRFS_SEND_C_MKSOCK;
2392 printk(KERN_WARNING "btrfs: unexpected inode type %o",
2393 (int)(mode & S_IFMT));
2398 ret = begin_cmd(sctx, cmd);
2402 ret = gen_unique_name(sctx, ino, gen, p);
2406 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2407 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2409 if (S_ISLNK(mode)) {
2411 ret = read_symlink(sctx->send_root, ino, p);
2414 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2415 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2416 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2417 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2418 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2421 ret = send_cmd(sctx);
2433 * We need some special handling for inodes that get processed before the parent
2434 * directory got created. See process_recorded_refs for details.
2435 * This function does the check if we already created the dir out of order.
2437 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2440 struct btrfs_path *path = NULL;
2441 struct btrfs_key key;
2442 struct btrfs_key found_key;
2443 struct btrfs_key di_key;
2444 struct extent_buffer *eb;
2445 struct btrfs_dir_item *di;
2448 path = alloc_path_for_send();
2455 key.type = BTRFS_DIR_INDEX_KEY;
2458 ret = btrfs_search_slot_for_read(sctx->send_root, &key, path,
2463 eb = path->nodes[0];
2464 slot = path->slots[0];
2465 btrfs_item_key_to_cpu(eb, &found_key, slot);
2467 if (ret || found_key.objectid != key.objectid ||
2468 found_key.type != key.type) {
2473 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2474 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2476 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2477 di_key.objectid < sctx->send_progress) {
2482 key.offset = found_key.offset + 1;
2483 btrfs_release_path(path);
2487 btrfs_free_path(path);
2492 * Only creates the inode if it is:
2493 * 1. Not a directory
2494 * 2. Or a directory which was not created already due to out of order
2495 * directories. See did_create_dir and process_recorded_refs for details.
2497 static int send_create_inode_if_needed(struct send_ctx *sctx)
2501 if (S_ISDIR(sctx->cur_inode_mode)) {
2502 ret = did_create_dir(sctx, sctx->cur_ino);
2511 ret = send_create_inode(sctx, sctx->cur_ino);
2519 struct recorded_ref {
2520 struct list_head list;
2523 struct fs_path *full_path;
2531 * We need to process new refs before deleted refs, but compare_tree gives us
2532 * everything mixed. So we first record all refs and later process them.
2533 * This function is a helper to record one ref.
2535 static int record_ref(struct list_head *head, u64 dir,
2536 u64 dir_gen, struct fs_path *path)
2538 struct recorded_ref *ref;
2540 ref = kmalloc(sizeof(*ref), GFP_NOFS);
2545 ref->dir_gen = dir_gen;
2546 ref->full_path = path;
2548 ref->name = (char *)kbasename(ref->full_path->start);
2549 ref->name_len = ref->full_path->end - ref->name;
2550 ref->dir_path = ref->full_path->start;
2551 if (ref->name == ref->full_path->start)
2552 ref->dir_path_len = 0;
2554 ref->dir_path_len = ref->full_path->end -
2555 ref->full_path->start - 1 - ref->name_len;
2557 list_add_tail(&ref->list, head);
2561 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2563 struct recorded_ref *new;
2565 new = kmalloc(sizeof(*ref), GFP_NOFS);
2569 new->dir = ref->dir;
2570 new->dir_gen = ref->dir_gen;
2571 new->full_path = NULL;
2572 INIT_LIST_HEAD(&new->list);
2573 list_add_tail(&new->list, list);
2577 static void __free_recorded_refs(struct list_head *head)
2579 struct recorded_ref *cur;
2581 while (!list_empty(head)) {
2582 cur = list_entry(head->next, struct recorded_ref, list);
2583 fs_path_free(cur->full_path);
2584 list_del(&cur->list);
2589 static void free_recorded_refs(struct send_ctx *sctx)
2591 __free_recorded_refs(&sctx->new_refs);
2592 __free_recorded_refs(&sctx->deleted_refs);
2596 * Renames/moves a file/dir to its orphan name. Used when the first
2597 * ref of an unprocessed inode gets overwritten and for all non empty
2600 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2601 struct fs_path *path)
2604 struct fs_path *orphan;
2606 orphan = fs_path_alloc();
2610 ret = gen_unique_name(sctx, ino, gen, orphan);
2614 ret = send_rename(sctx, path, orphan);
2617 fs_path_free(orphan);
2622 * Returns 1 if a directory can be removed at this point in time.
2623 * We check this by iterating all dir items and checking if the inode behind
2624 * the dir item was already processed.
2626 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress)
2629 struct btrfs_root *root = sctx->parent_root;
2630 struct btrfs_path *path;
2631 struct btrfs_key key;
2632 struct btrfs_key found_key;
2633 struct btrfs_key loc;
2634 struct btrfs_dir_item *di;
2637 * Don't try to rmdir the top/root subvolume dir.
2639 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2642 path = alloc_path_for_send();
2647 key.type = BTRFS_DIR_INDEX_KEY;
2651 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
2655 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2658 if (ret || found_key.objectid != key.objectid ||
2659 found_key.type != key.type) {
2663 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2664 struct btrfs_dir_item);
2665 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2667 if (loc.objectid > send_progress) {
2672 btrfs_release_path(path);
2673 key.offset = found_key.offset + 1;
2679 btrfs_free_path(path);
2684 * This does all the move/link/unlink/rmdir magic.
2686 static int process_recorded_refs(struct send_ctx *sctx)
2689 struct recorded_ref *cur;
2690 struct recorded_ref *cur2;
2691 struct list_head check_dirs;
2692 struct fs_path *valid_path = NULL;
2695 int did_overwrite = 0;
2698 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
2701 * This should never happen as the root dir always has the same ref
2702 * which is always '..'
2704 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
2705 INIT_LIST_HEAD(&check_dirs);
2707 valid_path = fs_path_alloc();
2714 * First, check if the first ref of the current inode was overwritten
2715 * before. If yes, we know that the current inode was already orphanized
2716 * and thus use the orphan name. If not, we can use get_cur_path to
2717 * get the path of the first ref as it would like while receiving at
2718 * this point in time.
2719 * New inodes are always orphan at the beginning, so force to use the
2720 * orphan name in this case.
2721 * The first ref is stored in valid_path and will be updated if it
2722 * gets moved around.
2724 if (!sctx->cur_inode_new) {
2725 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
2726 sctx->cur_inode_gen);
2732 if (sctx->cur_inode_new || did_overwrite) {
2733 ret = gen_unique_name(sctx, sctx->cur_ino,
2734 sctx->cur_inode_gen, valid_path);
2739 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
2745 list_for_each_entry(cur, &sctx->new_refs, list) {
2747 * We may have refs where the parent directory does not exist
2748 * yet. This happens if the parent directories inum is higher
2749 * the the current inum. To handle this case, we create the
2750 * parent directory out of order. But we need to check if this
2751 * did already happen before due to other refs in the same dir.
2753 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
2756 if (ret == inode_state_will_create) {
2759 * First check if any of the current inodes refs did
2760 * already create the dir.
2762 list_for_each_entry(cur2, &sctx->new_refs, list) {
2765 if (cur2->dir == cur->dir) {
2772 * If that did not happen, check if a previous inode
2773 * did already create the dir.
2776 ret = did_create_dir(sctx, cur->dir);
2780 ret = send_create_inode(sctx, cur->dir);
2787 * Check if this new ref would overwrite the first ref of
2788 * another unprocessed inode. If yes, orphanize the
2789 * overwritten inode. If we find an overwritten ref that is
2790 * not the first ref, simply unlink it.
2792 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
2793 cur->name, cur->name_len,
2794 &ow_inode, &ow_gen);
2798 ret = is_first_ref(sctx->parent_root,
2799 ow_inode, cur->dir, cur->name,
2804 ret = orphanize_inode(sctx, ow_inode, ow_gen,
2809 ret = send_unlink(sctx, cur->full_path);
2816 * link/move the ref to the new place. If we have an orphan
2817 * inode, move it and update valid_path. If not, link or move
2818 * it depending on the inode mode.
2821 ret = send_rename(sctx, valid_path, cur->full_path);
2825 ret = fs_path_copy(valid_path, cur->full_path);
2829 if (S_ISDIR(sctx->cur_inode_mode)) {
2831 * Dirs can't be linked, so move it. For moved
2832 * dirs, we always have one new and one deleted
2833 * ref. The deleted ref is ignored later.
2835 ret = send_rename(sctx, valid_path,
2839 ret = fs_path_copy(valid_path, cur->full_path);
2843 ret = send_link(sctx, cur->full_path,
2849 ret = dup_ref(cur, &check_dirs);
2854 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
2856 * Check if we can already rmdir the directory. If not,
2857 * orphanize it. For every dir item inside that gets deleted
2858 * later, we do this check again and rmdir it then if possible.
2859 * See the use of check_dirs for more details.
2861 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino);
2865 ret = send_rmdir(sctx, valid_path);
2868 } else if (!is_orphan) {
2869 ret = orphanize_inode(sctx, sctx->cur_ino,
2870 sctx->cur_inode_gen, valid_path);
2876 list_for_each_entry(cur, &sctx->deleted_refs, list) {
2877 ret = dup_ref(cur, &check_dirs);
2881 } else if (S_ISDIR(sctx->cur_inode_mode) &&
2882 !list_empty(&sctx->deleted_refs)) {
2884 * We have a moved dir. Add the old parent to check_dirs
2886 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
2888 ret = dup_ref(cur, &check_dirs);
2891 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
2893 * We have a non dir inode. Go through all deleted refs and
2894 * unlink them if they were not already overwritten by other
2897 list_for_each_entry(cur, &sctx->deleted_refs, list) {
2898 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
2899 sctx->cur_ino, sctx->cur_inode_gen,
2900 cur->name, cur->name_len);
2904 ret = send_unlink(sctx, cur->full_path);
2908 ret = dup_ref(cur, &check_dirs);
2913 * If the inode is still orphan, unlink the orphan. This may
2914 * happen when a previous inode did overwrite the first ref
2915 * of this inode and no new refs were added for the current
2916 * inode. Unlinking does not mean that the inode is deleted in
2917 * all cases. There may still be links to this inode in other
2921 ret = send_unlink(sctx, valid_path);
2928 * We did collect all parent dirs where cur_inode was once located. We
2929 * now go through all these dirs and check if they are pending for
2930 * deletion and if it's finally possible to perform the rmdir now.
2931 * We also update the inode stats of the parent dirs here.
2933 list_for_each_entry(cur, &check_dirs, list) {
2935 * In case we had refs into dirs that were not processed yet,
2936 * we don't need to do the utime and rmdir logic for these dirs.
2937 * The dir will be processed later.
2939 if (cur->dir > sctx->cur_ino)
2942 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
2946 if (ret == inode_state_did_create ||
2947 ret == inode_state_no_change) {
2948 /* TODO delayed utimes */
2949 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
2952 } else if (ret == inode_state_did_delete) {
2953 ret = can_rmdir(sctx, cur->dir, sctx->cur_ino);
2957 ret = get_cur_path(sctx, cur->dir,
2958 cur->dir_gen, valid_path);
2961 ret = send_rmdir(sctx, valid_path);
2971 __free_recorded_refs(&check_dirs);
2972 free_recorded_refs(sctx);
2973 fs_path_free(valid_path);
2977 static int __record_new_ref(int num, u64 dir, int index,
2978 struct fs_path *name,
2982 struct send_ctx *sctx = ctx;
2986 p = fs_path_alloc();
2990 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL,
2995 ret = get_cur_path(sctx, dir, gen, p);
2998 ret = fs_path_add_path(p, name);
3002 ret = record_ref(&sctx->new_refs, dir, gen, p);
3010 static int __record_deleted_ref(int num, u64 dir, int index,
3011 struct fs_path *name,
3015 struct send_ctx *sctx = ctx;
3019 p = fs_path_alloc();
3023 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL,
3028 ret = get_cur_path(sctx, dir, gen, p);
3031 ret = fs_path_add_path(p, name);
3035 ret = record_ref(&sctx->deleted_refs, dir, gen, p);
3043 static int record_new_ref(struct send_ctx *sctx)
3047 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3048 sctx->cmp_key, 0, __record_new_ref, sctx);
3057 static int record_deleted_ref(struct send_ctx *sctx)
3061 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3062 sctx->cmp_key, 0, __record_deleted_ref, sctx);
3071 struct find_ref_ctx {
3074 struct btrfs_root *root;
3075 struct fs_path *name;
3079 static int __find_iref(int num, u64 dir, int index,
3080 struct fs_path *name,
3083 struct find_ref_ctx *ctx = ctx_;
3087 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
3088 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
3090 * To avoid doing extra lookups we'll only do this if everything
3093 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
3097 if (dir_gen != ctx->dir_gen)
3099 ctx->found_idx = num;
3105 static int find_iref(struct btrfs_root *root,
3106 struct btrfs_path *path,
3107 struct btrfs_key *key,
3108 u64 dir, u64 dir_gen, struct fs_path *name)
3111 struct find_ref_ctx ctx;
3115 ctx.dir_gen = dir_gen;
3119 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
3123 if (ctx.found_idx == -1)
3126 return ctx.found_idx;
3129 static int __record_changed_new_ref(int num, u64 dir, int index,
3130 struct fs_path *name,
3135 struct send_ctx *sctx = ctx;
3137 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
3142 ret = find_iref(sctx->parent_root, sctx->right_path,
3143 sctx->cmp_key, dir, dir_gen, name);
3145 ret = __record_new_ref(num, dir, index, name, sctx);
3152 static int __record_changed_deleted_ref(int num, u64 dir, int index,
3153 struct fs_path *name,
3158 struct send_ctx *sctx = ctx;
3160 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
3165 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
3166 dir, dir_gen, name);
3168 ret = __record_deleted_ref(num, dir, index, name, sctx);
3175 static int record_changed_ref(struct send_ctx *sctx)
3179 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3180 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
3183 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3184 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
3194 * Record and process all refs at once. Needed when an inode changes the
3195 * generation number, which means that it was deleted and recreated.
3197 static int process_all_refs(struct send_ctx *sctx,
3198 enum btrfs_compare_tree_result cmd)
3201 struct btrfs_root *root;
3202 struct btrfs_path *path;
3203 struct btrfs_key key;
3204 struct btrfs_key found_key;
3205 struct extent_buffer *eb;
3207 iterate_inode_ref_t cb;
3209 path = alloc_path_for_send();
3213 if (cmd == BTRFS_COMPARE_TREE_NEW) {
3214 root = sctx->send_root;
3215 cb = __record_new_ref;
3216 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
3217 root = sctx->parent_root;
3218 cb = __record_deleted_ref;
3223 key.objectid = sctx->cmp_key->objectid;
3224 key.type = BTRFS_INODE_REF_KEY;
3227 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
3233 eb = path->nodes[0];
3234 slot = path->slots[0];
3235 btrfs_item_key_to_cpu(eb, &found_key, slot);
3237 if (found_key.objectid != key.objectid ||
3238 (found_key.type != BTRFS_INODE_REF_KEY &&
3239 found_key.type != BTRFS_INODE_EXTREF_KEY))
3242 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
3243 btrfs_release_path(path);
3247 key.offset = found_key.offset + 1;
3249 btrfs_release_path(path);
3251 ret = process_recorded_refs(sctx);
3254 btrfs_free_path(path);
3258 static int send_set_xattr(struct send_ctx *sctx,
3259 struct fs_path *path,
3260 const char *name, int name_len,
3261 const char *data, int data_len)
3265 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
3269 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3270 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3271 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
3273 ret = send_cmd(sctx);
3280 static int send_remove_xattr(struct send_ctx *sctx,
3281 struct fs_path *path,
3282 const char *name, int name_len)
3286 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
3290 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3291 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3293 ret = send_cmd(sctx);
3300 static int __process_new_xattr(int num, struct btrfs_key *di_key,
3301 const char *name, int name_len,
3302 const char *data, int data_len,
3306 struct send_ctx *sctx = ctx;
3308 posix_acl_xattr_header dummy_acl;
3310 p = fs_path_alloc();
3315 * This hack is needed because empty acl's are stored as zero byte
3316 * data in xattrs. Problem with that is, that receiving these zero byte
3317 * acl's will fail later. To fix this, we send a dummy acl list that
3318 * only contains the version number and no entries.
3320 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
3321 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
3322 if (data_len == 0) {
3323 dummy_acl.a_version =
3324 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
3325 data = (char *)&dummy_acl;
3326 data_len = sizeof(dummy_acl);
3330 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3334 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
3341 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
3342 const char *name, int name_len,
3343 const char *data, int data_len,
3347 struct send_ctx *sctx = ctx;
3350 p = fs_path_alloc();
3354 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3358 ret = send_remove_xattr(sctx, p, name, name_len);
3365 static int process_new_xattr(struct send_ctx *sctx)
3369 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
3370 sctx->cmp_key, __process_new_xattr, sctx);
3375 static int process_deleted_xattr(struct send_ctx *sctx)
3379 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
3380 sctx->cmp_key, __process_deleted_xattr, sctx);
3385 struct find_xattr_ctx {
3393 static int __find_xattr(int num, struct btrfs_key *di_key,
3394 const char *name, int name_len,
3395 const char *data, int data_len,
3396 u8 type, void *vctx)
3398 struct find_xattr_ctx *ctx = vctx;
3400 if (name_len == ctx->name_len &&
3401 strncmp(name, ctx->name, name_len) == 0) {
3402 ctx->found_idx = num;
3403 ctx->found_data_len = data_len;
3404 ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
3405 if (!ctx->found_data)
3412 static int find_xattr(struct btrfs_root *root,
3413 struct btrfs_path *path,
3414 struct btrfs_key *key,
3415 const char *name, int name_len,
3416 char **data, int *data_len)
3419 struct find_xattr_ctx ctx;
3422 ctx.name_len = name_len;
3424 ctx.found_data = NULL;
3425 ctx.found_data_len = 0;
3427 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
3431 if (ctx.found_idx == -1)
3434 *data = ctx.found_data;
3435 *data_len = ctx.found_data_len;
3437 kfree(ctx.found_data);
3439 return ctx.found_idx;
3443 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
3444 const char *name, int name_len,
3445 const char *data, int data_len,
3449 struct send_ctx *sctx = ctx;
3450 char *found_data = NULL;
3451 int found_data_len = 0;
3453 ret = find_xattr(sctx->parent_root, sctx->right_path,
3454 sctx->cmp_key, name, name_len, &found_data,
3456 if (ret == -ENOENT) {
3457 ret = __process_new_xattr(num, di_key, name, name_len, data,
3458 data_len, type, ctx);
3459 } else if (ret >= 0) {
3460 if (data_len != found_data_len ||
3461 memcmp(data, found_data, data_len)) {
3462 ret = __process_new_xattr(num, di_key, name, name_len,
3463 data, data_len, type, ctx);
3473 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
3474 const char *name, int name_len,
3475 const char *data, int data_len,
3479 struct send_ctx *sctx = ctx;
3481 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
3482 name, name_len, NULL, NULL);
3484 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
3485 data_len, type, ctx);
3492 static int process_changed_xattr(struct send_ctx *sctx)
3496 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
3497 sctx->cmp_key, __process_changed_new_xattr, sctx);
3500 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
3501 sctx->cmp_key, __process_changed_deleted_xattr, sctx);
3507 static int process_all_new_xattrs(struct send_ctx *sctx)
3510 struct btrfs_root *root;
3511 struct btrfs_path *path;
3512 struct btrfs_key key;
3513 struct btrfs_key found_key;
3514 struct extent_buffer *eb;
3517 path = alloc_path_for_send();
3521 root = sctx->send_root;
3523 key.objectid = sctx->cmp_key->objectid;
3524 key.type = BTRFS_XATTR_ITEM_KEY;
3527 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
3535 eb = path->nodes[0];
3536 slot = path->slots[0];
3537 btrfs_item_key_to_cpu(eb, &found_key, slot);
3539 if (found_key.objectid != key.objectid ||
3540 found_key.type != key.type) {
3545 ret = iterate_dir_item(root, path, &found_key,
3546 __process_new_xattr, sctx);
3550 btrfs_release_path(path);
3551 key.offset = found_key.offset + 1;
3555 btrfs_free_path(path);
3559 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
3561 struct btrfs_root *root = sctx->send_root;
3562 struct btrfs_fs_info *fs_info = root->fs_info;
3563 struct inode *inode;
3566 struct btrfs_key key;
3567 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
3569 unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
3572 key.objectid = sctx->cur_ino;
3573 key.type = BTRFS_INODE_ITEM_KEY;
3576 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3578 return PTR_ERR(inode);
3580 if (offset + len > i_size_read(inode)) {
3581 if (offset > i_size_read(inode))
3584 len = offset - i_size_read(inode);
3589 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
3590 while (index <= last_index) {
3591 unsigned cur_len = min_t(unsigned, len,
3592 PAGE_CACHE_SIZE - pg_offset);
3593 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
3599 if (!PageUptodate(page)) {
3600 btrfs_readpage(NULL, page);
3602 if (!PageUptodate(page)) {
3604 page_cache_release(page);
3611 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
3614 page_cache_release(page);
3626 * Read some bytes from the current inode/file and send a write command to
3629 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
3633 ssize_t num_read = 0;
3635 p = fs_path_alloc();
3639 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
3641 num_read = fill_read_buf(sctx, offset, len);
3642 if (num_read <= 0) {
3648 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
3652 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3656 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3657 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3658 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
3660 ret = send_cmd(sctx);
3671 * Send a clone command to user space.
3673 static int send_clone(struct send_ctx *sctx,
3674 u64 offset, u32 len,
3675 struct clone_root *clone_root)
3681 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
3682 "clone_inode=%llu, clone_offset=%llu\n", offset, len,
3683 clone_root->root->objectid, clone_root->ino,
3684 clone_root->offset);
3686 p = fs_path_alloc();
3690 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
3694 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3698 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3699 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
3700 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3702 if (clone_root->root == sctx->send_root) {
3703 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
3704 &gen, NULL, NULL, NULL, NULL);
3707 ret = get_cur_path(sctx, clone_root->ino, gen, p);
3709 ret = get_inode_path(clone_root->root, clone_root->ino, p);
3714 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
3715 clone_root->root->root_item.uuid);
3716 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
3717 le64_to_cpu(clone_root->root->root_item.ctransid));
3718 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
3719 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
3720 clone_root->offset);
3722 ret = send_cmd(sctx);
3731 * Send an update extent command to user space.
3733 static int send_update_extent(struct send_ctx *sctx,
3734 u64 offset, u32 len)
3739 p = fs_path_alloc();
3743 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
3747 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3751 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3752 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3753 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
3755 ret = send_cmd(sctx);
3763 static int send_hole(struct send_ctx *sctx, u64 end)
3765 struct fs_path *p = NULL;
3766 u64 offset = sctx->cur_inode_last_extent;
3770 p = fs_path_alloc();
3773 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
3774 while (offset < end) {
3775 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
3777 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
3780 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3783 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
3784 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
3785 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
3786 ret = send_cmd(sctx);
3796 static int send_write_or_clone(struct send_ctx *sctx,
3797 struct btrfs_path *path,
3798 struct btrfs_key *key,
3799 struct clone_root *clone_root)
3802 struct btrfs_file_extent_item *ei;
3803 u64 offset = key->offset;
3809 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3810 struct btrfs_file_extent_item);
3811 type = btrfs_file_extent_type(path->nodes[0], ei);
3812 if (type == BTRFS_FILE_EXTENT_INLINE) {
3813 len = btrfs_file_extent_inline_len(path->nodes[0], ei);
3815 * it is possible the inline item won't cover the whole page,
3816 * but there may be items after this page. Make
3817 * sure to send the whole thing
3819 len = PAGE_CACHE_ALIGN(len);
3821 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
3824 if (offset + len > sctx->cur_inode_size)
3825 len = sctx->cur_inode_size - offset;
3832 ret = send_clone(sctx, offset, len, clone_root);
3833 } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
3834 ret = send_update_extent(sctx, offset, len);
3838 if (l > BTRFS_SEND_READ_SIZE)
3839 l = BTRFS_SEND_READ_SIZE;
3840 ret = send_write(sctx, pos + offset, l);
3853 static int is_extent_unchanged(struct send_ctx *sctx,
3854 struct btrfs_path *left_path,
3855 struct btrfs_key *ekey)
3858 struct btrfs_key key;
3859 struct btrfs_path *path = NULL;
3860 struct extent_buffer *eb;
3862 struct btrfs_key found_key;
3863 struct btrfs_file_extent_item *ei;
3868 u64 left_offset_fixed;
3876 path = alloc_path_for_send();
3880 eb = left_path->nodes[0];
3881 slot = left_path->slots[0];
3882 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
3883 left_type = btrfs_file_extent_type(eb, ei);
3885 if (left_type != BTRFS_FILE_EXTENT_REG) {
3889 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
3890 left_len = btrfs_file_extent_num_bytes(eb, ei);
3891 left_offset = btrfs_file_extent_offset(eb, ei);
3892 left_gen = btrfs_file_extent_generation(eb, ei);
3895 * Following comments will refer to these graphics. L is the left
3896 * extents which we are checking at the moment. 1-8 are the right
3897 * extents that we iterate.
3900 * |-1-|-2a-|-3-|-4-|-5-|-6-|
3903 * |--1--|-2b-|...(same as above)
3905 * Alternative situation. Happens on files where extents got split.
3907 * |-----------7-----------|-6-|
3909 * Alternative situation. Happens on files which got larger.
3912 * Nothing follows after 8.
3915 key.objectid = ekey->objectid;
3916 key.type = BTRFS_EXTENT_DATA_KEY;
3917 key.offset = ekey->offset;
3918 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
3927 * Handle special case where the right side has no extents at all.
3929 eb = path->nodes[0];
3930 slot = path->slots[0];
3931 btrfs_item_key_to_cpu(eb, &found_key, slot);
3932 if (found_key.objectid != key.objectid ||
3933 found_key.type != key.type) {
3934 /* If we're a hole then just pretend nothing changed */
3935 ret = (left_disknr) ? 0 : 1;
3940 * We're now on 2a, 2b or 7.
3943 while (key.offset < ekey->offset + left_len) {
3944 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
3945 right_type = btrfs_file_extent_type(eb, ei);
3946 if (right_type != BTRFS_FILE_EXTENT_REG) {
3951 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
3952 right_len = btrfs_file_extent_num_bytes(eb, ei);
3953 right_offset = btrfs_file_extent_offset(eb, ei);
3954 right_gen = btrfs_file_extent_generation(eb, ei);
3957 * Are we at extent 8? If yes, we know the extent is changed.
3958 * This may only happen on the first iteration.
3960 if (found_key.offset + right_len <= ekey->offset) {
3961 /* If we're a hole just pretend nothing changed */
3962 ret = (left_disknr) ? 0 : 1;
3966 left_offset_fixed = left_offset;
3967 if (key.offset < ekey->offset) {
3968 /* Fix the right offset for 2a and 7. */
3969 right_offset += ekey->offset - key.offset;
3971 /* Fix the left offset for all behind 2a and 2b */
3972 left_offset_fixed += key.offset - ekey->offset;
3976 * Check if we have the same extent.
3978 if (left_disknr != right_disknr ||
3979 left_offset_fixed != right_offset ||
3980 left_gen != right_gen) {
3986 * Go to the next extent.
3988 ret = btrfs_next_item(sctx->parent_root, path);
3992 eb = path->nodes[0];
3993 slot = path->slots[0];
3994 btrfs_item_key_to_cpu(eb, &found_key, slot);
3996 if (ret || found_key.objectid != key.objectid ||
3997 found_key.type != key.type) {
3998 key.offset += right_len;
4001 if (found_key.offset != key.offset + right_len) {
4009 * We're now behind the left extent (treat as unchanged) or at the end
4010 * of the right side (treat as changed).
4012 if (key.offset >= ekey->offset + left_len)
4019 btrfs_free_path(path);
4023 static int get_last_extent(struct send_ctx *sctx, u64 offset)
4025 struct btrfs_path *path;
4026 struct btrfs_root *root = sctx->send_root;
4027 struct btrfs_file_extent_item *fi;
4028 struct btrfs_key key;
4033 path = alloc_path_for_send();
4037 sctx->cur_inode_last_extent = 0;
4039 key.objectid = sctx->cur_ino;
4040 key.type = BTRFS_EXTENT_DATA_KEY;
4041 key.offset = offset;
4042 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
4046 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4047 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
4050 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4051 struct btrfs_file_extent_item);
4052 type = btrfs_file_extent_type(path->nodes[0], fi);
4053 if (type == BTRFS_FILE_EXTENT_INLINE) {
4054 u64 size = btrfs_file_extent_inline_len(path->nodes[0], fi);
4055 extent_end = ALIGN(key.offset + size,
4056 sctx->send_root->sectorsize);
4058 extent_end = key.offset +
4059 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4061 sctx->cur_inode_last_extent = extent_end;
4063 btrfs_free_path(path);
4067 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
4068 struct btrfs_key *key)
4070 struct btrfs_file_extent_item *fi;
4075 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
4078 if (sctx->cur_inode_last_extent == (u64)-1) {
4079 ret = get_last_extent(sctx, key->offset - 1);
4084 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4085 struct btrfs_file_extent_item);
4086 type = btrfs_file_extent_type(path->nodes[0], fi);
4087 if (type == BTRFS_FILE_EXTENT_INLINE) {
4088 u64 size = btrfs_file_extent_inline_len(path->nodes[0], fi);
4089 extent_end = ALIGN(key->offset + size,
4090 sctx->send_root->sectorsize);
4092 extent_end = key->offset +
4093 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4095 if (sctx->cur_inode_last_extent < key->offset)
4096 ret = send_hole(sctx, key->offset);
4097 sctx->cur_inode_last_extent = extent_end;
4101 static int process_extent(struct send_ctx *sctx,
4102 struct btrfs_path *path,
4103 struct btrfs_key *key)
4105 struct clone_root *found_clone = NULL;
4108 if (S_ISLNK(sctx->cur_inode_mode))
4111 if (sctx->parent_root && !sctx->cur_inode_new) {
4112 ret = is_extent_unchanged(sctx, path, key);
4120 struct btrfs_file_extent_item *ei;
4123 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4124 struct btrfs_file_extent_item);
4125 type = btrfs_file_extent_type(path->nodes[0], ei);
4126 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
4127 type == BTRFS_FILE_EXTENT_REG) {
4129 * The send spec does not have a prealloc command yet,
4130 * so just leave a hole for prealloc'ed extents until
4131 * we have enough commands queued up to justify rev'ing
4134 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
4139 /* Have a hole, just skip it. */
4140 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
4147 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
4148 sctx->cur_inode_size, &found_clone);
4149 if (ret != -ENOENT && ret < 0)
4152 ret = send_write_or_clone(sctx, path, key, found_clone);
4156 ret = maybe_send_hole(sctx, path, key);
4161 static int process_all_extents(struct send_ctx *sctx)
4164 struct btrfs_root *root;
4165 struct btrfs_path *path;
4166 struct btrfs_key key;
4167 struct btrfs_key found_key;
4168 struct extent_buffer *eb;
4171 root = sctx->send_root;
4172 path = alloc_path_for_send();
4176 key.objectid = sctx->cmp_key->objectid;
4177 key.type = BTRFS_EXTENT_DATA_KEY;
4180 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
4188 eb = path->nodes[0];
4189 slot = path->slots[0];
4190 btrfs_item_key_to_cpu(eb, &found_key, slot);
4192 if (found_key.objectid != key.objectid ||
4193 found_key.type != key.type) {
4198 ret = process_extent(sctx, path, &found_key);
4202 btrfs_release_path(path);
4203 key.offset = found_key.offset + 1;
4207 btrfs_free_path(path);
4211 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
4215 if (sctx->cur_ino == 0)
4217 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
4218 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
4220 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
4223 ret = process_recorded_refs(sctx);
4228 * We have processed the refs and thus need to advance send_progress.
4229 * Now, calls to get_cur_xxx will take the updated refs of the current
4230 * inode into account.
4232 sctx->send_progress = sctx->cur_ino + 1;
4238 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
4250 ret = process_recorded_refs_if_needed(sctx, at_end);
4254 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
4256 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
4259 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
4260 &left_mode, &left_uid, &left_gid, NULL);
4264 if (!sctx->parent_root || sctx->cur_inode_new) {
4266 if (!S_ISLNK(sctx->cur_inode_mode))
4269 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
4270 NULL, NULL, &right_mode, &right_uid,
4275 if (left_uid != right_uid || left_gid != right_gid)
4277 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
4281 if (S_ISREG(sctx->cur_inode_mode)) {
4282 if (need_send_hole(sctx)) {
4283 if (sctx->cur_inode_last_extent == (u64)-1) {
4284 ret = get_last_extent(sctx, (u64)-1);
4288 if (sctx->cur_inode_last_extent <
4289 sctx->cur_inode_size) {
4290 ret = send_hole(sctx, sctx->cur_inode_size);
4295 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4296 sctx->cur_inode_size);
4302 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4303 left_uid, left_gid);
4308 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4315 * Need to send that every time, no matter if it actually changed
4316 * between the two trees as we have done changes to the inode before.
4318 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
4326 static int changed_inode(struct send_ctx *sctx,
4327 enum btrfs_compare_tree_result result)
4330 struct btrfs_key *key = sctx->cmp_key;
4331 struct btrfs_inode_item *left_ii = NULL;
4332 struct btrfs_inode_item *right_ii = NULL;
4336 sctx->cur_ino = key->objectid;
4337 sctx->cur_inode_new_gen = 0;
4338 sctx->cur_inode_last_extent = (u64)-1;
4341 * Set send_progress to current inode. This will tell all get_cur_xxx
4342 * functions that the current inode's refs are not updated yet. Later,
4343 * when process_recorded_refs is finished, it is set to cur_ino + 1.
4345 sctx->send_progress = sctx->cur_ino;
4347 if (result == BTRFS_COMPARE_TREE_NEW ||
4348 result == BTRFS_COMPARE_TREE_CHANGED) {
4349 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
4350 sctx->left_path->slots[0],
4351 struct btrfs_inode_item);
4352 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
4355 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
4356 sctx->right_path->slots[0],
4357 struct btrfs_inode_item);
4358 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
4361 if (result == BTRFS_COMPARE_TREE_CHANGED) {
4362 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
4363 sctx->right_path->slots[0],
4364 struct btrfs_inode_item);
4366 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
4370 * The cur_ino = root dir case is special here. We can't treat
4371 * the inode as deleted+reused because it would generate a
4372 * stream that tries to delete/mkdir the root dir.
4374 if (left_gen != right_gen &&
4375 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
4376 sctx->cur_inode_new_gen = 1;
4379 if (result == BTRFS_COMPARE_TREE_NEW) {
4380 sctx->cur_inode_gen = left_gen;
4381 sctx->cur_inode_new = 1;
4382 sctx->cur_inode_deleted = 0;
4383 sctx->cur_inode_size = btrfs_inode_size(
4384 sctx->left_path->nodes[0], left_ii);
4385 sctx->cur_inode_mode = btrfs_inode_mode(
4386 sctx->left_path->nodes[0], left_ii);
4387 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
4388 ret = send_create_inode_if_needed(sctx);
4389 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
4390 sctx->cur_inode_gen = right_gen;
4391 sctx->cur_inode_new = 0;
4392 sctx->cur_inode_deleted = 1;
4393 sctx->cur_inode_size = btrfs_inode_size(
4394 sctx->right_path->nodes[0], right_ii);
4395 sctx->cur_inode_mode = btrfs_inode_mode(
4396 sctx->right_path->nodes[0], right_ii);
4397 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
4399 * We need to do some special handling in case the inode was
4400 * reported as changed with a changed generation number. This
4401 * means that the original inode was deleted and new inode
4402 * reused the same inum. So we have to treat the old inode as
4403 * deleted and the new one as new.
4405 if (sctx->cur_inode_new_gen) {
4407 * First, process the inode as if it was deleted.
4409 sctx->cur_inode_gen = right_gen;
4410 sctx->cur_inode_new = 0;
4411 sctx->cur_inode_deleted = 1;
4412 sctx->cur_inode_size = btrfs_inode_size(
4413 sctx->right_path->nodes[0], right_ii);
4414 sctx->cur_inode_mode = btrfs_inode_mode(
4415 sctx->right_path->nodes[0], right_ii);
4416 ret = process_all_refs(sctx,
4417 BTRFS_COMPARE_TREE_DELETED);
4422 * Now process the inode as if it was new.
4424 sctx->cur_inode_gen = left_gen;
4425 sctx->cur_inode_new = 1;
4426 sctx->cur_inode_deleted = 0;
4427 sctx->cur_inode_size = btrfs_inode_size(
4428 sctx->left_path->nodes[0], left_ii);
4429 sctx->cur_inode_mode = btrfs_inode_mode(
4430 sctx->left_path->nodes[0], left_ii);
4431 ret = send_create_inode_if_needed(sctx);
4435 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
4439 * Advance send_progress now as we did not get into
4440 * process_recorded_refs_if_needed in the new_gen case.
4442 sctx->send_progress = sctx->cur_ino + 1;
4445 * Now process all extents and xattrs of the inode as if
4446 * they were all new.
4448 ret = process_all_extents(sctx);
4451 ret = process_all_new_xattrs(sctx);
4455 sctx->cur_inode_gen = left_gen;
4456 sctx->cur_inode_new = 0;
4457 sctx->cur_inode_new_gen = 0;
4458 sctx->cur_inode_deleted = 0;
4459 sctx->cur_inode_size = btrfs_inode_size(
4460 sctx->left_path->nodes[0], left_ii);
4461 sctx->cur_inode_mode = btrfs_inode_mode(
4462 sctx->left_path->nodes[0], left_ii);
4471 * We have to process new refs before deleted refs, but compare_trees gives us
4472 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
4473 * first and later process them in process_recorded_refs.
4474 * For the cur_inode_new_gen case, we skip recording completely because
4475 * changed_inode did already initiate processing of refs. The reason for this is
4476 * that in this case, compare_tree actually compares the refs of 2 different
4477 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
4478 * refs of the right tree as deleted and all refs of the left tree as new.
4480 static int changed_ref(struct send_ctx *sctx,
4481 enum btrfs_compare_tree_result result)
4485 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4487 if (!sctx->cur_inode_new_gen &&
4488 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
4489 if (result == BTRFS_COMPARE_TREE_NEW)
4490 ret = record_new_ref(sctx);
4491 else if (result == BTRFS_COMPARE_TREE_DELETED)
4492 ret = record_deleted_ref(sctx);
4493 else if (result == BTRFS_COMPARE_TREE_CHANGED)
4494 ret = record_changed_ref(sctx);
4501 * Process new/deleted/changed xattrs. We skip processing in the
4502 * cur_inode_new_gen case because changed_inode did already initiate processing
4503 * of xattrs. The reason is the same as in changed_ref
4505 static int changed_xattr(struct send_ctx *sctx,
4506 enum btrfs_compare_tree_result result)
4510 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4512 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
4513 if (result == BTRFS_COMPARE_TREE_NEW)
4514 ret = process_new_xattr(sctx);
4515 else if (result == BTRFS_COMPARE_TREE_DELETED)
4516 ret = process_deleted_xattr(sctx);
4517 else if (result == BTRFS_COMPARE_TREE_CHANGED)
4518 ret = process_changed_xattr(sctx);
4525 * Process new/deleted/changed extents. We skip processing in the
4526 * cur_inode_new_gen case because changed_inode did already initiate processing
4527 * of extents. The reason is the same as in changed_ref
4529 static int changed_extent(struct send_ctx *sctx,
4530 enum btrfs_compare_tree_result result)
4534 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
4536 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
4537 if (result != BTRFS_COMPARE_TREE_DELETED)
4538 ret = process_extent(sctx, sctx->left_path,
4545 static int dir_changed(struct send_ctx *sctx, u64 dir)
4547 u64 orig_gen, new_gen;
4550 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
4555 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
4560 return (orig_gen != new_gen) ? 1 : 0;
4563 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
4564 struct btrfs_key *key)
4566 struct btrfs_inode_extref *extref;
4567 struct extent_buffer *leaf;
4568 u64 dirid = 0, last_dirid = 0;
4575 /* Easy case, just check this one dirid */
4576 if (key->type == BTRFS_INODE_REF_KEY) {
4577 dirid = key->offset;
4579 ret = dir_changed(sctx, dirid);
4583 leaf = path->nodes[0];
4584 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4585 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4586 while (cur_offset < item_size) {
4587 extref = (struct btrfs_inode_extref *)(ptr +
4589 dirid = btrfs_inode_extref_parent(leaf, extref);
4590 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
4591 cur_offset += ref_name_len + sizeof(*extref);
4592 if (dirid == last_dirid)
4594 ret = dir_changed(sctx, dirid);
4604 * Updates compare related fields in sctx and simply forwards to the actual
4605 * changed_xxx functions.
4607 static int changed_cb(struct btrfs_root *left_root,
4608 struct btrfs_root *right_root,
4609 struct btrfs_path *left_path,
4610 struct btrfs_path *right_path,
4611 struct btrfs_key *key,
4612 enum btrfs_compare_tree_result result,
4616 struct send_ctx *sctx = ctx;
4618 if (result == BTRFS_COMPARE_TREE_SAME) {
4619 if (key->type == BTRFS_INODE_REF_KEY ||
4620 key->type == BTRFS_INODE_EXTREF_KEY) {
4621 ret = compare_refs(sctx, left_path, key);
4626 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
4627 return maybe_send_hole(sctx, left_path, key);
4631 result = BTRFS_COMPARE_TREE_CHANGED;
4635 sctx->left_path = left_path;
4636 sctx->right_path = right_path;
4637 sctx->cmp_key = key;
4639 ret = finish_inode_if_needed(sctx, 0);
4643 /* Ignore non-FS objects */
4644 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
4645 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
4648 if (key->type == BTRFS_INODE_ITEM_KEY)
4649 ret = changed_inode(sctx, result);
4650 else if (key->type == BTRFS_INODE_REF_KEY ||
4651 key->type == BTRFS_INODE_EXTREF_KEY)
4652 ret = changed_ref(sctx, result);
4653 else if (key->type == BTRFS_XATTR_ITEM_KEY)
4654 ret = changed_xattr(sctx, result);
4655 else if (key->type == BTRFS_EXTENT_DATA_KEY)
4656 ret = changed_extent(sctx, result);
4662 static int full_send_tree(struct send_ctx *sctx)
4665 struct btrfs_trans_handle *trans = NULL;
4666 struct btrfs_root *send_root = sctx->send_root;
4667 struct btrfs_key key;
4668 struct btrfs_key found_key;
4669 struct btrfs_path *path;
4670 struct extent_buffer *eb;
4675 path = alloc_path_for_send();
4679 spin_lock(&send_root->root_item_lock);
4680 start_ctransid = btrfs_root_ctransid(&send_root->root_item);
4681 spin_unlock(&send_root->root_item_lock);
4683 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
4684 key.type = BTRFS_INODE_ITEM_KEY;
4689 * We need to make sure the transaction does not get committed
4690 * while we do anything on commit roots. Join a transaction to prevent
4693 trans = btrfs_join_transaction(send_root);
4694 if (IS_ERR(trans)) {
4695 ret = PTR_ERR(trans);
4701 * Make sure the tree has not changed after re-joining. We detect this
4702 * by comparing start_ctransid and ctransid. They should always match.
4704 spin_lock(&send_root->root_item_lock);
4705 ctransid = btrfs_root_ctransid(&send_root->root_item);
4706 spin_unlock(&send_root->root_item_lock);
4708 if (ctransid != start_ctransid) {
4709 WARN(1, KERN_WARNING "btrfs: the root that you're trying to "
4710 "send was modified in between. This is "
4711 "probably a bug.\n");
4716 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
4724 * When someone want to commit while we iterate, end the
4725 * joined transaction and rejoin.
4727 if (btrfs_should_end_transaction(trans, send_root)) {
4728 ret = btrfs_end_transaction(trans, send_root);
4732 btrfs_release_path(path);
4736 eb = path->nodes[0];
4737 slot = path->slots[0];
4738 btrfs_item_key_to_cpu(eb, &found_key, slot);
4740 ret = changed_cb(send_root, NULL, path, NULL,
4741 &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
4745 key.objectid = found_key.objectid;
4746 key.type = found_key.type;
4747 key.offset = found_key.offset + 1;
4749 ret = btrfs_next_item(send_root, path);
4759 ret = finish_inode_if_needed(sctx, 1);
4762 btrfs_free_path(path);
4765 ret = btrfs_end_transaction(trans, send_root);
4767 btrfs_end_transaction(trans, send_root);
4772 static int send_subvol(struct send_ctx *sctx)
4776 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
4777 ret = send_header(sctx);
4782 ret = send_subvol_begin(sctx);
4786 if (sctx->parent_root) {
4787 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
4791 ret = finish_inode_if_needed(sctx, 1);
4795 ret = full_send_tree(sctx);
4801 free_recorded_refs(sctx);
4805 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
4808 struct btrfs_root *send_root;
4809 struct btrfs_root *clone_root;
4810 struct btrfs_fs_info *fs_info;
4811 struct btrfs_ioctl_send_args *arg = NULL;
4812 struct btrfs_key key;
4813 struct send_ctx *sctx = NULL;
4815 u64 *clone_sources_tmp = NULL;
4817 if (!capable(CAP_SYS_ADMIN))
4820 send_root = BTRFS_I(file_inode(mnt_file))->root;
4821 fs_info = send_root->fs_info;
4824 * This is done when we lookup the root, it should already be complete
4825 * by the time we get here.
4827 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
4830 * If we just created this root we need to make sure that the orphan
4831 * cleanup has been done and committed since we search the commit root,
4832 * so check its commit root transid with our otransid and if they match
4833 * commit the transaction to make sure everything is updated.
4835 down_read(&send_root->fs_info->extent_commit_sem);
4836 if (btrfs_header_generation(send_root->commit_root) ==
4837 btrfs_root_otransid(&send_root->root_item)) {
4838 struct btrfs_trans_handle *trans;
4840 up_read(&send_root->fs_info->extent_commit_sem);
4842 trans = btrfs_attach_transaction_barrier(send_root);
4843 if (IS_ERR(trans)) {
4844 if (PTR_ERR(trans) != -ENOENT) {
4845 ret = PTR_ERR(trans);
4848 /* ENOENT means theres no transaction */
4850 ret = btrfs_commit_transaction(trans, send_root);
4855 up_read(&send_root->fs_info->extent_commit_sem);
4858 arg = memdup_user(arg_, sizeof(*arg));
4865 if (!access_ok(VERIFY_READ, arg->clone_sources,
4866 sizeof(*arg->clone_sources) *
4867 arg->clone_sources_count)) {
4872 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
4877 sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
4883 INIT_LIST_HEAD(&sctx->new_refs);
4884 INIT_LIST_HEAD(&sctx->deleted_refs);
4885 INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
4886 INIT_LIST_HEAD(&sctx->name_cache_list);
4888 sctx->flags = arg->flags;
4890 sctx->send_filp = fget(arg->send_fd);
4891 if (!sctx->send_filp) {
4896 sctx->mnt = mnt_file->f_path.mnt;
4898 sctx->send_root = send_root;
4899 sctx->clone_roots_cnt = arg->clone_sources_count;
4901 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
4902 sctx->send_buf = vmalloc(sctx->send_max_size);
4903 if (!sctx->send_buf) {
4908 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
4909 if (!sctx->read_buf) {
4914 sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
4915 (arg->clone_sources_count + 1));
4916 if (!sctx->clone_roots) {
4921 if (arg->clone_sources_count) {
4922 clone_sources_tmp = vmalloc(arg->clone_sources_count *
4923 sizeof(*arg->clone_sources));
4924 if (!clone_sources_tmp) {
4929 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
4930 arg->clone_sources_count *
4931 sizeof(*arg->clone_sources));
4937 for (i = 0; i < arg->clone_sources_count; i++) {
4938 key.objectid = clone_sources_tmp[i];
4939 key.type = BTRFS_ROOT_ITEM_KEY;
4940 key.offset = (u64)-1;
4941 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
4942 if (IS_ERR(clone_root)) {
4943 ret = PTR_ERR(clone_root);
4946 sctx->clone_roots[i].root = clone_root;
4948 vfree(clone_sources_tmp);
4949 clone_sources_tmp = NULL;
4952 if (arg->parent_root) {
4953 key.objectid = arg->parent_root;
4954 key.type = BTRFS_ROOT_ITEM_KEY;
4955 key.offset = (u64)-1;
4956 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
4957 if (IS_ERR(sctx->parent_root)) {
4958 ret = PTR_ERR(sctx->parent_root);
4964 * Clones from send_root are allowed, but only if the clone source
4965 * is behind the current send position. This is checked while searching
4966 * for possible clone sources.
4968 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
4970 /* We do a bsearch later */
4971 sort(sctx->clone_roots, sctx->clone_roots_cnt,
4972 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
4975 ret = send_subvol(sctx);
4979 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
4980 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
4983 ret = send_cmd(sctx);
4990 vfree(clone_sources_tmp);
4993 if (sctx->send_filp)
4994 fput(sctx->send_filp);
4996 vfree(sctx->clone_roots);
4997 vfree(sctx->send_buf);
4998 vfree(sctx->read_buf);
5000 name_cache_free(sctx);