3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
46 #include "rbd_types.h"
48 #define RBD_DEBUG /* Activate rbd_assert() calls */
51 * The basic unit of block I/O is a sector. It is interpreted in a
52 * number of contexts in Linux (blk, bio, genhd), but the default is
53 * universally 512 bytes. These symbols are just slightly more
54 * meaningful than the bare numbers they represent.
56 #define SECTOR_SHIFT 9
57 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60 * Increment the given counter and return its updated value.
61 * If the counter is already 0 it will not be incremented.
62 * If the counter is already at its maximum value returns
63 * -EINVAL without updating it.
65 static int atomic_inc_return_safe(atomic_t *v)
69 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
70 if (counter <= (unsigned int)INT_MAX)
78 /* Decrement the counter. Return the resulting value, or -EINVAL */
79 static int atomic_dec_return_safe(atomic_t *v)
83 counter = atomic_dec_return(v);
92 #define RBD_DRV_NAME "rbd"
94 #define RBD_MINORS_PER_MAJOR 256
95 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
97 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
98 #define RBD_MAX_SNAP_NAME_LEN \
99 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
103 #define RBD_SNAP_HEAD_NAME "-"
105 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107 /* This allows a single page to hold an image name sent by OSD */
108 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
109 #define RBD_IMAGE_ID_LEN_MAX 64
111 #define RBD_OBJ_PREFIX_LEN_MAX 64
115 #define RBD_FEATURE_LAYERING (1<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1<<1)
117 #define RBD_FEATURES_ALL \
118 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
120 /* Features supported by this (client software) implementation. */
122 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
125 * An RBD device name will be "rbd#", where the "rbd" comes from
126 * RBD_DRV_NAME above, and # is a unique integer identifier.
127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
128 * enough to hold all possible device names.
130 #define DEV_NAME_LEN 32
131 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
134 * block device image metadata (in-memory version)
136 struct rbd_image_header {
137 /* These six fields never change for a given rbd image */
144 u64 features; /* Might be changeable someday? */
146 /* The remaining fields need to be updated occasionally */
148 struct ceph_snap_context *snapc;
149 char *snap_names; /* format 1 only */
150 u64 *snap_sizes; /* format 1 only */
154 * An rbd image specification.
156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
157 * identify an image. Each rbd_dev structure includes a pointer to
158 * an rbd_spec structure that encapsulates this identity.
160 * Each of the id's in an rbd_spec has an associated name. For a
161 * user-mapped image, the names are supplied and the id's associated
162 * with them are looked up. For a layered image, a parent image is
163 * defined by the tuple, and the names are looked up.
165 * An rbd_dev structure contains a parent_spec pointer which is
166 * non-null if the image it represents is a child in a layered
167 * image. This pointer will refer to the rbd_spec structure used
168 * by the parent rbd_dev for its own identity (i.e., the structure
169 * is shared between the parent and child).
171 * Since these structures are populated once, during the discovery
172 * phase of image construction, they are effectively immutable so
173 * we make no effort to synchronize access to them.
175 * Note that code herein does not assume the image name is known (it
176 * could be a null pointer).
180 const char *pool_name;
182 const char *image_id;
183 const char *image_name;
186 const char *snap_name;
192 * an instance of the client. multiple devices may share an rbd client.
195 struct ceph_client *client;
197 struct list_head node;
200 struct rbd_img_request;
201 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205 struct rbd_obj_request;
206 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208 enum obj_request_type {
209 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
214 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
215 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
216 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
219 struct rbd_obj_request {
220 const char *object_name;
221 u64 offset; /* object start byte */
222 u64 length; /* bytes from offset */
226 * An object request associated with an image will have its
227 * img_data flag set; a standalone object request will not.
229 * A standalone object request will have which == BAD_WHICH
230 * and a null obj_request pointer.
232 * An object request initiated in support of a layered image
233 * object (to check for its existence before a write) will
234 * have which == BAD_WHICH and a non-null obj_request pointer.
236 * Finally, an object request for rbd image data will have
237 * which != BAD_WHICH, and will have a non-null img_request
238 * pointer. The value of which will be in the range
239 * 0..(img_request->obj_request_count-1).
242 struct rbd_obj_request *obj_request; /* STAT op */
244 struct rbd_img_request *img_request;
246 /* links for img_request->obj_requests list */
247 struct list_head links;
250 u32 which; /* posn image request list */
252 enum obj_request_type type;
254 struct bio *bio_list;
260 struct page **copyup_pages;
261 u32 copyup_page_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
268 rbd_obj_callback_t callback;
269 struct completion completion;
275 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
276 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
277 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
280 struct rbd_img_request {
281 struct rbd_device *rbd_dev;
282 u64 offset; /* starting image byte offset */
283 u64 length; /* byte count from offset */
286 u64 snap_id; /* for reads */
287 struct ceph_snap_context *snapc; /* for writes */
290 struct request *rq; /* block request */
291 struct rbd_obj_request *obj_request; /* obj req initiator */
293 struct page **copyup_pages;
294 u32 copyup_page_count;
295 spinlock_t completion_lock;/* protects next_completion */
297 rbd_img_callback_t callback;
298 u64 xferred;/* aggregate bytes transferred */
299 int result; /* first nonzero obj_request result */
301 u32 obj_request_count;
302 struct list_head obj_requests; /* rbd_obj_request structs */
307 #define for_each_obj_request(ireq, oreq) \
308 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
309 #define for_each_obj_request_from(ireq, oreq) \
310 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_safe(ireq, oreq, n) \
312 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324 int dev_id; /* blkdev unique id */
326 int major; /* blkdev assigned major */
328 struct gendisk *disk; /* blkdev's gendisk and rq */
330 u32 image_format; /* Either 1 or 2 */
331 struct rbd_client *rbd_client;
333 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
335 spinlock_t lock; /* queue, flags, open_count */
337 struct rbd_image_header header;
338 unsigned long flags; /* possibly lock protected */
339 struct rbd_spec *spec;
343 struct ceph_file_layout layout;
345 struct ceph_osd_event *watch_event;
346 struct rbd_obj_request *watch_request;
348 struct rbd_spec *parent_spec;
351 struct rbd_device *parent;
353 /* protects updating the header */
354 struct rw_semaphore header_rwsem;
356 struct rbd_mapping mapping;
358 struct list_head node;
362 unsigned long open_count; /* protected by lock */
366 * Flag bits for rbd_dev->flags. If atomicity is required,
367 * rbd_dev->lock is used to protect access.
369 * Currently, only the "removing" flag (which is coupled with the
370 * "open_count" field) requires atomic access.
373 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
374 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
377 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
379 static LIST_HEAD(rbd_dev_list); /* devices */
380 static DEFINE_SPINLOCK(rbd_dev_list_lock);
382 static LIST_HEAD(rbd_client_list); /* clients */
383 static DEFINE_SPINLOCK(rbd_client_list_lock);
385 /* Slab caches for frequently-allocated structures */
387 static struct kmem_cache *rbd_img_request_cache;
388 static struct kmem_cache *rbd_obj_request_cache;
389 static struct kmem_cache *rbd_segment_name_cache;
391 static int rbd_major;
392 static DEFINE_IDA(rbd_dev_id_ida);
395 * Default to false for now, as single-major requires >= 0.75 version of
396 * userspace rbd utility.
398 static bool single_major = false;
399 module_param(single_major, bool, S_IRUGO);
400 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
402 static int rbd_img_request_submit(struct rbd_img_request *img_request);
404 static void rbd_dev_device_release(struct device *dev);
406 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
408 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
410 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
412 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
414 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
415 static void rbd_spec_put(struct rbd_spec *spec);
417 static int rbd_dev_id_to_minor(int dev_id)
419 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
422 static int minor_to_rbd_dev_id(int minor)
424 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
427 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
428 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
429 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
430 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
432 static struct attribute *rbd_bus_attrs[] = {
434 &bus_attr_remove.attr,
435 &bus_attr_add_single_major.attr,
436 &bus_attr_remove_single_major.attr,
440 static umode_t rbd_bus_is_visible(struct kobject *kobj,
441 struct attribute *attr, int index)
444 (attr == &bus_attr_add_single_major.attr ||
445 attr == &bus_attr_remove_single_major.attr))
451 static const struct attribute_group rbd_bus_group = {
452 .attrs = rbd_bus_attrs,
453 .is_visible = rbd_bus_is_visible,
455 __ATTRIBUTE_GROUPS(rbd_bus);
457 static struct bus_type rbd_bus_type = {
459 .bus_groups = rbd_bus_groups,
462 static void rbd_root_dev_release(struct device *dev)
466 static struct device rbd_root_dev = {
468 .release = rbd_root_dev_release,
471 static __printf(2, 3)
472 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
474 struct va_format vaf;
482 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
483 else if (rbd_dev->disk)
484 printk(KERN_WARNING "%s: %s: %pV\n",
485 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
486 else if (rbd_dev->spec && rbd_dev->spec->image_name)
487 printk(KERN_WARNING "%s: image %s: %pV\n",
488 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
489 else if (rbd_dev->spec && rbd_dev->spec->image_id)
490 printk(KERN_WARNING "%s: id %s: %pV\n",
491 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
493 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
494 RBD_DRV_NAME, rbd_dev, &vaf);
499 #define rbd_assert(expr) \
500 if (unlikely(!(expr))) { \
501 printk(KERN_ERR "\nAssertion failure in %s() " \
503 "\trbd_assert(%s);\n\n", \
504 __func__, __LINE__, #expr); \
507 #else /* !RBD_DEBUG */
508 # define rbd_assert(expr) ((void) 0)
509 #endif /* !RBD_DEBUG */
511 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
512 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
513 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
515 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
516 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
517 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
518 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
520 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
521 u8 *order, u64 *snap_size);
522 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
524 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
526 static int rbd_open(struct block_device *bdev, fmode_t mode)
528 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
529 bool removing = false;
531 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
534 spin_lock_irq(&rbd_dev->lock);
535 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
538 rbd_dev->open_count++;
539 spin_unlock_irq(&rbd_dev->lock);
543 (void) get_device(&rbd_dev->dev);
544 set_device_ro(bdev, rbd_dev->mapping.read_only);
549 static void rbd_release(struct gendisk *disk, fmode_t mode)
551 struct rbd_device *rbd_dev = disk->private_data;
552 unsigned long open_count_before;
554 spin_lock_irq(&rbd_dev->lock);
555 open_count_before = rbd_dev->open_count--;
556 spin_unlock_irq(&rbd_dev->lock);
557 rbd_assert(open_count_before > 0);
559 put_device(&rbd_dev->dev);
562 static const struct block_device_operations rbd_bd_ops = {
563 .owner = THIS_MODULE,
565 .release = rbd_release,
569 * Initialize an rbd client instance. Success or not, this function
570 * consumes ceph_opts. Caller holds client_mutex.
572 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
574 struct rbd_client *rbdc;
577 dout("%s:\n", __func__);
578 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
582 kref_init(&rbdc->kref);
583 INIT_LIST_HEAD(&rbdc->node);
585 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
586 if (IS_ERR(rbdc->client))
588 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
590 ret = ceph_open_session(rbdc->client);
594 spin_lock(&rbd_client_list_lock);
595 list_add_tail(&rbdc->node, &rbd_client_list);
596 spin_unlock(&rbd_client_list_lock);
598 dout("%s: rbdc %p\n", __func__, rbdc);
602 ceph_destroy_client(rbdc->client);
607 ceph_destroy_options(ceph_opts);
608 dout("%s: error %d\n", __func__, ret);
613 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
615 kref_get(&rbdc->kref);
621 * Find a ceph client with specific addr and configuration. If
622 * found, bump its reference count.
624 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
626 struct rbd_client *client_node;
629 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
632 spin_lock(&rbd_client_list_lock);
633 list_for_each_entry(client_node, &rbd_client_list, node) {
634 if (!ceph_compare_options(ceph_opts, client_node->client)) {
635 __rbd_get_client(client_node);
641 spin_unlock(&rbd_client_list_lock);
643 return found ? client_node : NULL;
653 /* string args above */
656 /* Boolean args above */
660 static match_table_t rbd_opts_tokens = {
662 /* string args above */
663 {Opt_read_only, "read_only"},
664 {Opt_read_only, "ro"}, /* Alternate spelling */
665 {Opt_read_write, "read_write"},
666 {Opt_read_write, "rw"}, /* Alternate spelling */
667 /* Boolean args above */
675 #define RBD_READ_ONLY_DEFAULT false
677 static int parse_rbd_opts_token(char *c, void *private)
679 struct rbd_options *rbd_opts = private;
680 substring_t argstr[MAX_OPT_ARGS];
681 int token, intval, ret;
683 token = match_token(c, rbd_opts_tokens, argstr);
687 if (token < Opt_last_int) {
688 ret = match_int(&argstr[0], &intval);
690 pr_err("bad mount option arg (not int) "
694 dout("got int token %d val %d\n", token, intval);
695 } else if (token > Opt_last_int && token < Opt_last_string) {
696 dout("got string token %d val %s\n", token,
698 } else if (token > Opt_last_string && token < Opt_last_bool) {
699 dout("got Boolean token %d\n", token);
701 dout("got token %d\n", token);
706 rbd_opts->read_only = true;
709 rbd_opts->read_only = false;
719 * Get a ceph client with specific addr and configuration, if one does
720 * not exist create it. Either way, ceph_opts is consumed by this
723 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
725 struct rbd_client *rbdc;
727 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
728 rbdc = rbd_client_find(ceph_opts);
729 if (rbdc) /* using an existing client */
730 ceph_destroy_options(ceph_opts);
732 rbdc = rbd_client_create(ceph_opts);
733 mutex_unlock(&client_mutex);
739 * Destroy ceph client
741 * Caller must hold rbd_client_list_lock.
743 static void rbd_client_release(struct kref *kref)
745 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
747 dout("%s: rbdc %p\n", __func__, rbdc);
748 spin_lock(&rbd_client_list_lock);
749 list_del(&rbdc->node);
750 spin_unlock(&rbd_client_list_lock);
752 ceph_destroy_client(rbdc->client);
757 * Drop reference to ceph client node. If it's not referenced anymore, release
760 static void rbd_put_client(struct rbd_client *rbdc)
763 kref_put(&rbdc->kref, rbd_client_release);
766 static bool rbd_image_format_valid(u32 image_format)
768 return image_format == 1 || image_format == 2;
771 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
776 /* The header has to start with the magic rbd header text */
777 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
780 /* The bio layer requires at least sector-sized I/O */
782 if (ondisk->options.order < SECTOR_SHIFT)
785 /* If we use u64 in a few spots we may be able to loosen this */
787 if (ondisk->options.order > 8 * sizeof (int) - 1)
791 * The size of a snapshot header has to fit in a size_t, and
792 * that limits the number of snapshots.
794 snap_count = le32_to_cpu(ondisk->snap_count);
795 size = SIZE_MAX - sizeof (struct ceph_snap_context);
796 if (snap_count > size / sizeof (__le64))
800 * Not only that, but the size of the entire the snapshot
801 * header must also be representable in a size_t.
803 size -= snap_count * sizeof (__le64);
804 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
811 * Fill an rbd image header with information from the given format 1
814 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
815 struct rbd_image_header_ondisk *ondisk)
817 struct rbd_image_header *header = &rbd_dev->header;
818 bool first_time = header->object_prefix == NULL;
819 struct ceph_snap_context *snapc;
820 char *object_prefix = NULL;
821 char *snap_names = NULL;
822 u64 *snap_sizes = NULL;
828 /* Allocate this now to avoid having to handle failure below */
833 len = strnlen(ondisk->object_prefix,
834 sizeof (ondisk->object_prefix));
835 object_prefix = kmalloc(len + 1, GFP_KERNEL);
838 memcpy(object_prefix, ondisk->object_prefix, len);
839 object_prefix[len] = '\0';
842 /* Allocate the snapshot context and fill it in */
844 snap_count = le32_to_cpu(ondisk->snap_count);
845 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
848 snapc->seq = le64_to_cpu(ondisk->snap_seq);
850 struct rbd_image_snap_ondisk *snaps;
851 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
853 /* We'll keep a copy of the snapshot names... */
855 if (snap_names_len > (u64)SIZE_MAX)
857 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
861 /* ...as well as the array of their sizes. */
863 size = snap_count * sizeof (*header->snap_sizes);
864 snap_sizes = kmalloc(size, GFP_KERNEL);
869 * Copy the names, and fill in each snapshot's id
872 * Note that rbd_dev_v1_header_info() guarantees the
873 * ondisk buffer we're working with has
874 * snap_names_len bytes beyond the end of the
875 * snapshot id array, this memcpy() is safe.
877 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
878 snaps = ondisk->snaps;
879 for (i = 0; i < snap_count; i++) {
880 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
881 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
885 /* We won't fail any more, fill in the header */
888 header->object_prefix = object_prefix;
889 header->obj_order = ondisk->options.order;
890 header->crypt_type = ondisk->options.crypt_type;
891 header->comp_type = ondisk->options.comp_type;
892 /* The rest aren't used for format 1 images */
893 header->stripe_unit = 0;
894 header->stripe_count = 0;
895 header->features = 0;
897 ceph_put_snap_context(header->snapc);
898 kfree(header->snap_names);
899 kfree(header->snap_sizes);
902 /* The remaining fields always get updated (when we refresh) */
904 header->image_size = le64_to_cpu(ondisk->image_size);
905 header->snapc = snapc;
906 header->snap_names = snap_names;
907 header->snap_sizes = snap_sizes;
909 /* Make sure mapping size is consistent with header info */
911 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
912 if (rbd_dev->mapping.size != header->image_size)
913 rbd_dev->mapping.size = header->image_size;
921 ceph_put_snap_context(snapc);
922 kfree(object_prefix);
927 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
929 const char *snap_name;
931 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
933 /* Skip over names until we find the one we are looking for */
935 snap_name = rbd_dev->header.snap_names;
937 snap_name += strlen(snap_name) + 1;
939 return kstrdup(snap_name, GFP_KERNEL);
943 * Snapshot id comparison function for use with qsort()/bsearch().
944 * Note that result is for snapshots in *descending* order.
946 static int snapid_compare_reverse(const void *s1, const void *s2)
948 u64 snap_id1 = *(u64 *)s1;
949 u64 snap_id2 = *(u64 *)s2;
951 if (snap_id1 < snap_id2)
953 return snap_id1 == snap_id2 ? 0 : -1;
957 * Search a snapshot context to see if the given snapshot id is
960 * Returns the position of the snapshot id in the array if it's found,
961 * or BAD_SNAP_INDEX otherwise.
963 * Note: The snapshot array is in kept sorted (by the osd) in
964 * reverse order, highest snapshot id first.
966 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
968 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
971 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
972 sizeof (snap_id), snapid_compare_reverse);
974 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
977 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
981 const char *snap_name;
983 which = rbd_dev_snap_index(rbd_dev, snap_id);
984 if (which == BAD_SNAP_INDEX)
985 return ERR_PTR(-ENOENT);
987 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
988 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
991 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
993 if (snap_id == CEPH_NOSNAP)
994 return RBD_SNAP_HEAD_NAME;
996 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
997 if (rbd_dev->image_format == 1)
998 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1000 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1003 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1006 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1007 if (snap_id == CEPH_NOSNAP) {
1008 *snap_size = rbd_dev->header.image_size;
1009 } else if (rbd_dev->image_format == 1) {
1012 which = rbd_dev_snap_index(rbd_dev, snap_id);
1013 if (which == BAD_SNAP_INDEX)
1016 *snap_size = rbd_dev->header.snap_sizes[which];
1021 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1030 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1033 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1034 if (snap_id == CEPH_NOSNAP) {
1035 *snap_features = rbd_dev->header.features;
1036 } else if (rbd_dev->image_format == 1) {
1037 *snap_features = 0; /* No features for format 1 */
1042 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1046 *snap_features = features;
1051 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1053 u64 snap_id = rbd_dev->spec->snap_id;
1058 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1061 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1065 rbd_dev->mapping.size = size;
1066 rbd_dev->mapping.features = features;
1071 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1073 rbd_dev->mapping.size = 0;
1074 rbd_dev->mapping.features = 0;
1077 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1084 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1087 segment = offset >> rbd_dev->header.obj_order;
1088 name_format = "%s.%012llx";
1089 if (rbd_dev->image_format == 2)
1090 name_format = "%s.%016llx";
1091 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1092 rbd_dev->header.object_prefix, segment);
1093 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1094 pr_err("error formatting segment name for #%llu (%d)\n",
1103 static void rbd_segment_name_free(const char *name)
1105 /* The explicit cast here is needed to drop the const qualifier */
1107 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1110 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1112 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1114 return offset & (segment_size - 1);
1117 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1118 u64 offset, u64 length)
1120 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1122 offset &= segment_size - 1;
1124 rbd_assert(length <= U64_MAX - offset);
1125 if (offset + length > segment_size)
1126 length = segment_size - offset;
1132 * returns the size of an object in the image
1134 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1136 return 1 << header->obj_order;
1143 static void bio_chain_put(struct bio *chain)
1149 chain = chain->bi_next;
1155 * zeros a bio chain, starting at specific offset
1157 static void zero_bio_chain(struct bio *chain, int start_ofs)
1160 struct bvec_iter iter;
1161 unsigned long flags;
1166 bio_for_each_segment(bv, chain, iter) {
1167 if (pos + bv.bv_len > start_ofs) {
1168 int remainder = max(start_ofs - pos, 0);
1169 buf = bvec_kmap_irq(&bv, &flags);
1170 memset(buf + remainder, 0,
1171 bv.bv_len - remainder);
1172 flush_dcache_page(bv.bv_page);
1173 bvec_kunmap_irq(buf, &flags);
1178 chain = chain->bi_next;
1183 * similar to zero_bio_chain(), zeros data defined by a page array,
1184 * starting at the given byte offset from the start of the array and
1185 * continuing up to the given end offset. The pages array is
1186 * assumed to be big enough to hold all bytes up to the end.
1188 static void zero_pages(struct page **pages, u64 offset, u64 end)
1190 struct page **page = &pages[offset >> PAGE_SHIFT];
1192 rbd_assert(end > offset);
1193 rbd_assert(end - offset <= (u64)SIZE_MAX);
1194 while (offset < end) {
1197 unsigned long flags;
1200 page_offset = offset & ~PAGE_MASK;
1201 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1202 local_irq_save(flags);
1203 kaddr = kmap_atomic(*page);
1204 memset(kaddr + page_offset, 0, length);
1205 flush_dcache_page(*page);
1206 kunmap_atomic(kaddr);
1207 local_irq_restore(flags);
1215 * Clone a portion of a bio, starting at the given byte offset
1216 * and continuing for the number of bytes indicated.
1218 static struct bio *bio_clone_range(struct bio *bio_src,
1219 unsigned int offset,
1225 bio = bio_clone(bio_src, gfpmask);
1227 return NULL; /* ENOMEM */
1229 bio_advance(bio, offset);
1230 bio->bi_iter.bi_size = len;
1236 * Clone a portion of a bio chain, starting at the given byte offset
1237 * into the first bio in the source chain and continuing for the
1238 * number of bytes indicated. The result is another bio chain of
1239 * exactly the given length, or a null pointer on error.
1241 * The bio_src and offset parameters are both in-out. On entry they
1242 * refer to the first source bio and the offset into that bio where
1243 * the start of data to be cloned is located.
1245 * On return, bio_src is updated to refer to the bio in the source
1246 * chain that contains first un-cloned byte, and *offset will
1247 * contain the offset of that byte within that bio.
1249 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1250 unsigned int *offset,
1254 struct bio *bi = *bio_src;
1255 unsigned int off = *offset;
1256 struct bio *chain = NULL;
1259 /* Build up a chain of clone bios up to the limit */
1261 if (!bi || off >= bi->bi_iter.bi_size || !len)
1262 return NULL; /* Nothing to clone */
1266 unsigned int bi_size;
1270 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1271 goto out_err; /* EINVAL; ran out of bio's */
1273 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1274 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1276 goto out_err; /* ENOMEM */
1279 end = &bio->bi_next;
1282 if (off == bi->bi_iter.bi_size) {
1293 bio_chain_put(chain);
1299 * The default/initial value for all object request flags is 0. For
1300 * each flag, once its value is set to 1 it is never reset to 0
1303 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1305 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1306 struct rbd_device *rbd_dev;
1308 rbd_dev = obj_request->img_request->rbd_dev;
1309 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1314 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1317 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1320 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1322 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1323 struct rbd_device *rbd_dev = NULL;
1325 if (obj_request_img_data_test(obj_request))
1326 rbd_dev = obj_request->img_request->rbd_dev;
1327 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1332 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1335 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1339 * This sets the KNOWN flag after (possibly) setting the EXISTS
1340 * flag. The latter is set based on the "exists" value provided.
1342 * Note that for our purposes once an object exists it never goes
1343 * away again. It's possible that the response from two existence
1344 * checks are separated by the creation of the target object, and
1345 * the first ("doesn't exist") response arrives *after* the second
1346 * ("does exist"). In that case we ignore the second one.
1348 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1352 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1353 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1357 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1360 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1363 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1366 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1369 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1371 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1372 atomic_read(&obj_request->kref.refcount));
1373 kref_get(&obj_request->kref);
1376 static void rbd_obj_request_destroy(struct kref *kref);
1377 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1379 rbd_assert(obj_request != NULL);
1380 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1381 atomic_read(&obj_request->kref.refcount));
1382 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1385 static void rbd_img_request_get(struct rbd_img_request *img_request)
1387 dout("%s: img %p (was %d)\n", __func__, img_request,
1388 atomic_read(&img_request->kref.refcount));
1389 kref_get(&img_request->kref);
1392 static bool img_request_child_test(struct rbd_img_request *img_request);
1393 static void rbd_parent_request_destroy(struct kref *kref);
1394 static void rbd_img_request_destroy(struct kref *kref);
1395 static void rbd_img_request_put(struct rbd_img_request *img_request)
1397 rbd_assert(img_request != NULL);
1398 dout("%s: img %p (was %d)\n", __func__, img_request,
1399 atomic_read(&img_request->kref.refcount));
1400 if (img_request_child_test(img_request))
1401 kref_put(&img_request->kref, rbd_parent_request_destroy);
1403 kref_put(&img_request->kref, rbd_img_request_destroy);
1406 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1407 struct rbd_obj_request *obj_request)
1409 rbd_assert(obj_request->img_request == NULL);
1411 /* Image request now owns object's original reference */
1412 obj_request->img_request = img_request;
1413 obj_request->which = img_request->obj_request_count;
1414 rbd_assert(!obj_request_img_data_test(obj_request));
1415 obj_request_img_data_set(obj_request);
1416 rbd_assert(obj_request->which != BAD_WHICH);
1417 img_request->obj_request_count++;
1418 list_add_tail(&obj_request->links, &img_request->obj_requests);
1419 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1420 obj_request->which);
1423 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1424 struct rbd_obj_request *obj_request)
1426 rbd_assert(obj_request->which != BAD_WHICH);
1428 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1429 obj_request->which);
1430 list_del(&obj_request->links);
1431 rbd_assert(img_request->obj_request_count > 0);
1432 img_request->obj_request_count--;
1433 rbd_assert(obj_request->which == img_request->obj_request_count);
1434 obj_request->which = BAD_WHICH;
1435 rbd_assert(obj_request_img_data_test(obj_request));
1436 rbd_assert(obj_request->img_request == img_request);
1437 obj_request->img_request = NULL;
1438 obj_request->callback = NULL;
1439 rbd_obj_request_put(obj_request);
1442 static bool obj_request_type_valid(enum obj_request_type type)
1445 case OBJ_REQUEST_NODATA:
1446 case OBJ_REQUEST_BIO:
1447 case OBJ_REQUEST_PAGES:
1454 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1455 struct rbd_obj_request *obj_request)
1457 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1459 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1462 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1465 dout("%s: img %p\n", __func__, img_request);
1468 * If no error occurred, compute the aggregate transfer
1469 * count for the image request. We could instead use
1470 * atomic64_cmpxchg() to update it as each object request
1471 * completes; not clear which way is better off hand.
1473 if (!img_request->result) {
1474 struct rbd_obj_request *obj_request;
1477 for_each_obj_request(img_request, obj_request)
1478 xferred += obj_request->xferred;
1479 img_request->xferred = xferred;
1482 if (img_request->callback)
1483 img_request->callback(img_request);
1485 rbd_img_request_put(img_request);
1488 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1490 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1492 dout("%s: obj %p\n", __func__, obj_request);
1494 return wait_for_completion_interruptible(&obj_request->completion);
1498 * The default/initial value for all image request flags is 0. Each
1499 * is conditionally set to 1 at image request initialization time
1500 * and currently never change thereafter.
1502 static void img_request_write_set(struct rbd_img_request *img_request)
1504 set_bit(IMG_REQ_WRITE, &img_request->flags);
1508 static bool img_request_write_test(struct rbd_img_request *img_request)
1511 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1514 static void img_request_child_set(struct rbd_img_request *img_request)
1516 set_bit(IMG_REQ_CHILD, &img_request->flags);
1520 static void img_request_child_clear(struct rbd_img_request *img_request)
1522 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1526 static bool img_request_child_test(struct rbd_img_request *img_request)
1529 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1532 static void img_request_layered_set(struct rbd_img_request *img_request)
1534 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1538 static void img_request_layered_clear(struct rbd_img_request *img_request)
1540 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1544 static bool img_request_layered_test(struct rbd_img_request *img_request)
1547 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1551 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1553 u64 xferred = obj_request->xferred;
1554 u64 length = obj_request->length;
1556 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1557 obj_request, obj_request->img_request, obj_request->result,
1560 * ENOENT means a hole in the image. We zero-fill the entire
1561 * length of the request. A short read also implies zero-fill
1562 * to the end of the request. An error requires the whole
1563 * length of the request to be reported finished with an error
1564 * to the block layer. In each case we update the xferred
1565 * count to indicate the whole request was satisfied.
1567 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1568 if (obj_request->result == -ENOENT) {
1569 if (obj_request->type == OBJ_REQUEST_BIO)
1570 zero_bio_chain(obj_request->bio_list, 0);
1572 zero_pages(obj_request->pages, 0, length);
1573 obj_request->result = 0;
1574 } else if (xferred < length && !obj_request->result) {
1575 if (obj_request->type == OBJ_REQUEST_BIO)
1576 zero_bio_chain(obj_request->bio_list, xferred);
1578 zero_pages(obj_request->pages, xferred, length);
1580 obj_request->xferred = length;
1581 obj_request_done_set(obj_request);
1584 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1586 dout("%s: obj %p cb %p\n", __func__, obj_request,
1587 obj_request->callback);
1588 if (obj_request->callback)
1589 obj_request->callback(obj_request);
1591 complete_all(&obj_request->completion);
1594 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1596 dout("%s: obj %p\n", __func__, obj_request);
1597 obj_request_done_set(obj_request);
1600 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1602 struct rbd_img_request *img_request = NULL;
1603 struct rbd_device *rbd_dev = NULL;
1604 bool layered = false;
1606 if (obj_request_img_data_test(obj_request)) {
1607 img_request = obj_request->img_request;
1608 layered = img_request && img_request_layered_test(img_request);
1609 rbd_dev = img_request->rbd_dev;
1612 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1613 obj_request, img_request, obj_request->result,
1614 obj_request->xferred, obj_request->length);
1615 if (layered && obj_request->result == -ENOENT &&
1616 obj_request->img_offset < rbd_dev->parent_overlap)
1617 rbd_img_parent_read(obj_request);
1618 else if (img_request)
1619 rbd_img_obj_request_read_callback(obj_request);
1621 obj_request_done_set(obj_request);
1624 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1626 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1627 obj_request->result, obj_request->length);
1629 * There is no such thing as a successful short write. Set
1630 * it to our originally-requested length.
1632 obj_request->xferred = obj_request->length;
1633 obj_request_done_set(obj_request);
1637 * For a simple stat call there's nothing to do. We'll do more if
1638 * this is part of a write sequence for a layered image.
1640 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1642 dout("%s: obj %p\n", __func__, obj_request);
1643 obj_request_done_set(obj_request);
1646 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1647 struct ceph_msg *msg)
1649 struct rbd_obj_request *obj_request = osd_req->r_priv;
1652 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1653 rbd_assert(osd_req == obj_request->osd_req);
1654 if (obj_request_img_data_test(obj_request)) {
1655 rbd_assert(obj_request->img_request);
1656 rbd_assert(obj_request->which != BAD_WHICH);
1658 rbd_assert(obj_request->which == BAD_WHICH);
1661 if (osd_req->r_result < 0)
1662 obj_request->result = osd_req->r_result;
1664 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1667 * We support a 64-bit length, but ultimately it has to be
1668 * passed to blk_end_request(), which takes an unsigned int.
1670 obj_request->xferred = osd_req->r_reply_op_len[0];
1671 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1673 opcode = osd_req->r_ops[0].op;
1675 case CEPH_OSD_OP_READ:
1676 rbd_osd_read_callback(obj_request);
1678 case CEPH_OSD_OP_SETALLOCHINT:
1679 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
1681 case CEPH_OSD_OP_WRITE:
1682 rbd_osd_write_callback(obj_request);
1684 case CEPH_OSD_OP_STAT:
1685 rbd_osd_stat_callback(obj_request);
1687 case CEPH_OSD_OP_CALL:
1688 case CEPH_OSD_OP_NOTIFY_ACK:
1689 case CEPH_OSD_OP_WATCH:
1690 rbd_osd_trivial_callback(obj_request);
1693 rbd_warn(NULL, "%s: unsupported op %hu\n",
1694 obj_request->object_name, (unsigned short) opcode);
1698 if (obj_request_done_test(obj_request))
1699 rbd_obj_request_complete(obj_request);
1702 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1704 struct rbd_img_request *img_request = obj_request->img_request;
1705 struct ceph_osd_request *osd_req = obj_request->osd_req;
1708 rbd_assert(osd_req != NULL);
1710 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1711 ceph_osdc_build_request(osd_req, obj_request->offset,
1712 NULL, snap_id, NULL);
1715 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1717 struct rbd_img_request *img_request = obj_request->img_request;
1718 struct ceph_osd_request *osd_req = obj_request->osd_req;
1719 struct ceph_snap_context *snapc;
1720 struct timespec mtime = CURRENT_TIME;
1722 rbd_assert(osd_req != NULL);
1724 snapc = img_request ? img_request->snapc : NULL;
1725 ceph_osdc_build_request(osd_req, obj_request->offset,
1726 snapc, CEPH_NOSNAP, &mtime);
1730 * Create an osd request. A read request has one osd op (read).
1731 * A write request has either one (watch) or two (hint+write) osd ops.
1732 * (All rbd data writes are prefixed with an allocation hint op, but
1733 * technically osd watch is a write request, hence this distinction.)
1735 static struct ceph_osd_request *rbd_osd_req_create(
1736 struct rbd_device *rbd_dev,
1738 unsigned int num_ops,
1739 struct rbd_obj_request *obj_request)
1741 struct ceph_snap_context *snapc = NULL;
1742 struct ceph_osd_client *osdc;
1743 struct ceph_osd_request *osd_req;
1745 if (obj_request_img_data_test(obj_request)) {
1746 struct rbd_img_request *img_request = obj_request->img_request;
1748 rbd_assert(write_request ==
1749 img_request_write_test(img_request));
1751 snapc = img_request->snapc;
1754 rbd_assert(num_ops == 1 || (write_request && num_ops == 2));
1756 /* Allocate and initialize the request, for the num_ops ops */
1758 osdc = &rbd_dev->rbd_client->client->osdc;
1759 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1762 return NULL; /* ENOMEM */
1765 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1767 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1769 osd_req->r_callback = rbd_osd_req_callback;
1770 osd_req->r_priv = obj_request;
1772 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1773 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1779 * Create a copyup osd request based on the information in the
1780 * object request supplied. A copyup request has three osd ops,
1781 * a copyup method call, a hint op, and a write op.
1783 static struct ceph_osd_request *
1784 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1786 struct rbd_img_request *img_request;
1787 struct ceph_snap_context *snapc;
1788 struct rbd_device *rbd_dev;
1789 struct ceph_osd_client *osdc;
1790 struct ceph_osd_request *osd_req;
1792 rbd_assert(obj_request_img_data_test(obj_request));
1793 img_request = obj_request->img_request;
1794 rbd_assert(img_request);
1795 rbd_assert(img_request_write_test(img_request));
1797 /* Allocate and initialize the request, for the three ops */
1799 snapc = img_request->snapc;
1800 rbd_dev = img_request->rbd_dev;
1801 osdc = &rbd_dev->rbd_client->client->osdc;
1802 osd_req = ceph_osdc_alloc_request(osdc, snapc, 3, false, GFP_ATOMIC);
1804 return NULL; /* ENOMEM */
1806 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1807 osd_req->r_callback = rbd_osd_req_callback;
1808 osd_req->r_priv = obj_request;
1810 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1811 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1817 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1819 ceph_osdc_put_request(osd_req);
1822 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1824 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1825 u64 offset, u64 length,
1826 enum obj_request_type type)
1828 struct rbd_obj_request *obj_request;
1832 rbd_assert(obj_request_type_valid(type));
1834 size = strlen(object_name) + 1;
1835 name = kmalloc(size, GFP_KERNEL);
1839 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1845 obj_request->object_name = memcpy(name, object_name, size);
1846 obj_request->offset = offset;
1847 obj_request->length = length;
1848 obj_request->flags = 0;
1849 obj_request->which = BAD_WHICH;
1850 obj_request->type = type;
1851 INIT_LIST_HEAD(&obj_request->links);
1852 init_completion(&obj_request->completion);
1853 kref_init(&obj_request->kref);
1855 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1856 offset, length, (int)type, obj_request);
1861 static void rbd_obj_request_destroy(struct kref *kref)
1863 struct rbd_obj_request *obj_request;
1865 obj_request = container_of(kref, struct rbd_obj_request, kref);
1867 dout("%s: obj %p\n", __func__, obj_request);
1869 rbd_assert(obj_request->img_request == NULL);
1870 rbd_assert(obj_request->which == BAD_WHICH);
1872 if (obj_request->osd_req)
1873 rbd_osd_req_destroy(obj_request->osd_req);
1875 rbd_assert(obj_request_type_valid(obj_request->type));
1876 switch (obj_request->type) {
1877 case OBJ_REQUEST_NODATA:
1878 break; /* Nothing to do */
1879 case OBJ_REQUEST_BIO:
1880 if (obj_request->bio_list)
1881 bio_chain_put(obj_request->bio_list);
1883 case OBJ_REQUEST_PAGES:
1884 if (obj_request->pages)
1885 ceph_release_page_vector(obj_request->pages,
1886 obj_request->page_count);
1890 kfree(obj_request->object_name);
1891 obj_request->object_name = NULL;
1892 kmem_cache_free(rbd_obj_request_cache, obj_request);
1895 /* It's OK to call this for a device with no parent */
1897 static void rbd_spec_put(struct rbd_spec *spec);
1898 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1900 rbd_dev_remove_parent(rbd_dev);
1901 rbd_spec_put(rbd_dev->parent_spec);
1902 rbd_dev->parent_spec = NULL;
1903 rbd_dev->parent_overlap = 0;
1907 * Parent image reference counting is used to determine when an
1908 * image's parent fields can be safely torn down--after there are no
1909 * more in-flight requests to the parent image. When the last
1910 * reference is dropped, cleaning them up is safe.
1912 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1916 if (!rbd_dev->parent_spec)
1919 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1923 /* Last reference; clean up parent data structures */
1926 rbd_dev_unparent(rbd_dev);
1928 rbd_warn(rbd_dev, "parent reference underflow\n");
1932 * If an image has a non-zero parent overlap, get a reference to its
1935 * We must get the reference before checking for the overlap to
1936 * coordinate properly with zeroing the parent overlap in
1937 * rbd_dev_v2_parent_info() when an image gets flattened. We
1938 * drop it again if there is no overlap.
1940 * Returns true if the rbd device has a parent with a non-zero
1941 * overlap and a reference for it was successfully taken, or
1944 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1948 if (!rbd_dev->parent_spec)
1951 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1952 if (counter > 0 && rbd_dev->parent_overlap)
1955 /* Image was flattened, but parent is not yet torn down */
1958 rbd_warn(rbd_dev, "parent reference overflow\n");
1964 * Caller is responsible for filling in the list of object requests
1965 * that comprises the image request, and the Linux request pointer
1966 * (if there is one).
1968 static struct rbd_img_request *rbd_img_request_create(
1969 struct rbd_device *rbd_dev,
1970 u64 offset, u64 length,
1973 struct rbd_img_request *img_request;
1975 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1979 if (write_request) {
1980 down_read(&rbd_dev->header_rwsem);
1981 ceph_get_snap_context(rbd_dev->header.snapc);
1982 up_read(&rbd_dev->header_rwsem);
1985 img_request->rq = NULL;
1986 img_request->rbd_dev = rbd_dev;
1987 img_request->offset = offset;
1988 img_request->length = length;
1989 img_request->flags = 0;
1990 if (write_request) {
1991 img_request_write_set(img_request);
1992 img_request->snapc = rbd_dev->header.snapc;
1994 img_request->snap_id = rbd_dev->spec->snap_id;
1996 if (rbd_dev_parent_get(rbd_dev))
1997 img_request_layered_set(img_request);
1998 spin_lock_init(&img_request->completion_lock);
1999 img_request->next_completion = 0;
2000 img_request->callback = NULL;
2001 img_request->result = 0;
2002 img_request->obj_request_count = 0;
2003 INIT_LIST_HEAD(&img_request->obj_requests);
2004 kref_init(&img_request->kref);
2006 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2007 write_request ? "write" : "read", offset, length,
2013 static void rbd_img_request_destroy(struct kref *kref)
2015 struct rbd_img_request *img_request;
2016 struct rbd_obj_request *obj_request;
2017 struct rbd_obj_request *next_obj_request;
2019 img_request = container_of(kref, struct rbd_img_request, kref);
2021 dout("%s: img %p\n", __func__, img_request);
2023 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2024 rbd_img_obj_request_del(img_request, obj_request);
2025 rbd_assert(img_request->obj_request_count == 0);
2027 if (img_request_layered_test(img_request)) {
2028 img_request_layered_clear(img_request);
2029 rbd_dev_parent_put(img_request->rbd_dev);
2032 if (img_request_write_test(img_request))
2033 ceph_put_snap_context(img_request->snapc);
2035 kmem_cache_free(rbd_img_request_cache, img_request);
2038 static struct rbd_img_request *rbd_parent_request_create(
2039 struct rbd_obj_request *obj_request,
2040 u64 img_offset, u64 length)
2042 struct rbd_img_request *parent_request;
2043 struct rbd_device *rbd_dev;
2045 rbd_assert(obj_request->img_request);
2046 rbd_dev = obj_request->img_request->rbd_dev;
2048 parent_request = rbd_img_request_create(rbd_dev->parent,
2049 img_offset, length, false);
2050 if (!parent_request)
2053 img_request_child_set(parent_request);
2054 rbd_obj_request_get(obj_request);
2055 parent_request->obj_request = obj_request;
2057 return parent_request;
2060 static void rbd_parent_request_destroy(struct kref *kref)
2062 struct rbd_img_request *parent_request;
2063 struct rbd_obj_request *orig_request;
2065 parent_request = container_of(kref, struct rbd_img_request, kref);
2066 orig_request = parent_request->obj_request;
2068 parent_request->obj_request = NULL;
2069 rbd_obj_request_put(orig_request);
2070 img_request_child_clear(parent_request);
2072 rbd_img_request_destroy(kref);
2075 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2077 struct rbd_img_request *img_request;
2078 unsigned int xferred;
2082 rbd_assert(obj_request_img_data_test(obj_request));
2083 img_request = obj_request->img_request;
2085 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2086 xferred = (unsigned int)obj_request->xferred;
2087 result = obj_request->result;
2089 struct rbd_device *rbd_dev = img_request->rbd_dev;
2091 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2092 img_request_write_test(img_request) ? "write" : "read",
2093 obj_request->length, obj_request->img_offset,
2094 obj_request->offset);
2095 rbd_warn(rbd_dev, " result %d xferred %x\n",
2097 if (!img_request->result)
2098 img_request->result = result;
2101 /* Image object requests don't own their page array */
2103 if (obj_request->type == OBJ_REQUEST_PAGES) {
2104 obj_request->pages = NULL;
2105 obj_request->page_count = 0;
2108 if (img_request_child_test(img_request)) {
2109 rbd_assert(img_request->obj_request != NULL);
2110 more = obj_request->which < img_request->obj_request_count - 1;
2112 rbd_assert(img_request->rq != NULL);
2113 more = blk_end_request(img_request->rq, result, xferred);
2119 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2121 struct rbd_img_request *img_request;
2122 u32 which = obj_request->which;
2125 rbd_assert(obj_request_img_data_test(obj_request));
2126 img_request = obj_request->img_request;
2128 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2129 rbd_assert(img_request != NULL);
2130 rbd_assert(img_request->obj_request_count > 0);
2131 rbd_assert(which != BAD_WHICH);
2132 rbd_assert(which < img_request->obj_request_count);
2134 spin_lock_irq(&img_request->completion_lock);
2135 if (which != img_request->next_completion)
2138 for_each_obj_request_from(img_request, obj_request) {
2140 rbd_assert(which < img_request->obj_request_count);
2142 if (!obj_request_done_test(obj_request))
2144 more = rbd_img_obj_end_request(obj_request);
2148 rbd_assert(more ^ (which == img_request->obj_request_count));
2149 img_request->next_completion = which;
2151 spin_unlock_irq(&img_request->completion_lock);
2152 rbd_img_request_put(img_request);
2155 rbd_img_request_complete(img_request);
2159 * Split up an image request into one or more object requests, each
2160 * to a different object. The "type" parameter indicates whether
2161 * "data_desc" is the pointer to the head of a list of bio
2162 * structures, or the base of a page array. In either case this
2163 * function assumes data_desc describes memory sufficient to hold
2164 * all data described by the image request.
2166 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2167 enum obj_request_type type,
2170 struct rbd_device *rbd_dev = img_request->rbd_dev;
2171 struct rbd_obj_request *obj_request = NULL;
2172 struct rbd_obj_request *next_obj_request;
2173 bool write_request = img_request_write_test(img_request);
2174 struct bio *bio_list = NULL;
2175 unsigned int bio_offset = 0;
2176 struct page **pages = NULL;
2181 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2182 (int)type, data_desc);
2184 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2185 img_offset = img_request->offset;
2186 resid = img_request->length;
2187 rbd_assert(resid > 0);
2189 if (type == OBJ_REQUEST_BIO) {
2190 bio_list = data_desc;
2191 rbd_assert(img_offset ==
2192 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2194 rbd_assert(type == OBJ_REQUEST_PAGES);
2199 struct ceph_osd_request *osd_req;
2200 const char *object_name;
2203 unsigned int which = 0;
2205 object_name = rbd_segment_name(rbd_dev, img_offset);
2208 offset = rbd_segment_offset(rbd_dev, img_offset);
2209 length = rbd_segment_length(rbd_dev, img_offset, resid);
2210 obj_request = rbd_obj_request_create(object_name,
2211 offset, length, type);
2212 /* object request has its own copy of the object name */
2213 rbd_segment_name_free(object_name);
2218 * set obj_request->img_request before creating the
2219 * osd_request so that it gets the right snapc
2221 rbd_img_obj_request_add(img_request, obj_request);
2223 if (type == OBJ_REQUEST_BIO) {
2224 unsigned int clone_size;
2226 rbd_assert(length <= (u64)UINT_MAX);
2227 clone_size = (unsigned int)length;
2228 obj_request->bio_list =
2229 bio_chain_clone_range(&bio_list,
2233 if (!obj_request->bio_list)
2236 unsigned int page_count;
2238 obj_request->pages = pages;
2239 page_count = (u32)calc_pages_for(offset, length);
2240 obj_request->page_count = page_count;
2241 if ((offset + length) & ~PAGE_MASK)
2242 page_count--; /* more on last page */
2243 pages += page_count;
2246 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2247 (write_request ? 2 : 1),
2251 obj_request->osd_req = osd_req;
2252 obj_request->callback = rbd_img_obj_callback;
2253 rbd_img_request_get(img_request);
2255 if (write_request) {
2256 osd_req_op_alloc_hint_init(osd_req, which,
2257 rbd_obj_bytes(&rbd_dev->header),
2258 rbd_obj_bytes(&rbd_dev->header));
2262 osd_req_op_extent_init(osd_req, which, opcode, offset, length,
2264 if (type == OBJ_REQUEST_BIO)
2265 osd_req_op_extent_osd_data_bio(osd_req, which,
2266 obj_request->bio_list, length);
2268 osd_req_op_extent_osd_data_pages(osd_req, which,
2269 obj_request->pages, length,
2270 offset & ~PAGE_MASK, false, false);
2273 rbd_osd_req_format_write(obj_request);
2275 rbd_osd_req_format_read(obj_request);
2277 obj_request->img_offset = img_offset;
2279 img_offset += length;
2286 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2287 rbd_img_obj_request_del(img_request, obj_request);
2293 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2295 struct rbd_img_request *img_request;
2296 struct rbd_device *rbd_dev;
2297 struct page **pages;
2300 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2301 rbd_assert(obj_request_img_data_test(obj_request));
2302 img_request = obj_request->img_request;
2303 rbd_assert(img_request);
2305 rbd_dev = img_request->rbd_dev;
2306 rbd_assert(rbd_dev);
2308 pages = obj_request->copyup_pages;
2309 rbd_assert(pages != NULL);
2310 obj_request->copyup_pages = NULL;
2311 page_count = obj_request->copyup_page_count;
2312 rbd_assert(page_count);
2313 obj_request->copyup_page_count = 0;
2314 ceph_release_page_vector(pages, page_count);
2317 * We want the transfer count to reflect the size of the
2318 * original write request. There is no such thing as a
2319 * successful short write, so if the request was successful
2320 * we can just set it to the originally-requested length.
2322 if (!obj_request->result)
2323 obj_request->xferred = obj_request->length;
2325 /* Finish up with the normal image object callback */
2327 rbd_img_obj_callback(obj_request);
2331 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2333 struct rbd_obj_request *orig_request;
2334 struct ceph_osd_request *osd_req;
2335 struct ceph_osd_client *osdc;
2336 struct rbd_device *rbd_dev;
2337 struct page **pages;
2344 rbd_assert(img_request_child_test(img_request));
2346 /* First get what we need from the image request */
2348 pages = img_request->copyup_pages;
2349 rbd_assert(pages != NULL);
2350 img_request->copyup_pages = NULL;
2351 page_count = img_request->copyup_page_count;
2352 rbd_assert(page_count);
2353 img_request->copyup_page_count = 0;
2355 orig_request = img_request->obj_request;
2356 rbd_assert(orig_request != NULL);
2357 rbd_assert(obj_request_type_valid(orig_request->type));
2358 img_result = img_request->result;
2359 parent_length = img_request->length;
2360 rbd_assert(parent_length == img_request->xferred);
2361 rbd_img_request_put(img_request);
2363 rbd_assert(orig_request->img_request);
2364 rbd_dev = orig_request->img_request->rbd_dev;
2365 rbd_assert(rbd_dev);
2368 * If the overlap has become 0 (most likely because the
2369 * image has been flattened) we need to free the pages
2370 * and re-submit the original write request.
2372 if (!rbd_dev->parent_overlap) {
2373 struct ceph_osd_client *osdc;
2375 ceph_release_page_vector(pages, page_count);
2376 osdc = &rbd_dev->rbd_client->client->osdc;
2377 img_result = rbd_obj_request_submit(osdc, orig_request);
2386 * The original osd request is of no use to use any more.
2387 * We need a new one that can hold the three ops in a copyup
2388 * request. Allocate the new copyup osd request for the
2389 * original request, and release the old one.
2391 img_result = -ENOMEM;
2392 osd_req = rbd_osd_req_create_copyup(orig_request);
2395 rbd_osd_req_destroy(orig_request->osd_req);
2396 orig_request->osd_req = osd_req;
2397 orig_request->copyup_pages = pages;
2398 orig_request->copyup_page_count = page_count;
2400 /* Initialize the copyup op */
2402 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2403 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2406 /* Then the hint op */
2408 osd_req_op_alloc_hint_init(osd_req, 1, rbd_obj_bytes(&rbd_dev->header),
2409 rbd_obj_bytes(&rbd_dev->header));
2411 /* And the original write request op */
2413 offset = orig_request->offset;
2414 length = orig_request->length;
2415 osd_req_op_extent_init(osd_req, 2, CEPH_OSD_OP_WRITE,
2416 offset, length, 0, 0);
2417 if (orig_request->type == OBJ_REQUEST_BIO)
2418 osd_req_op_extent_osd_data_bio(osd_req, 2,
2419 orig_request->bio_list, length);
2421 osd_req_op_extent_osd_data_pages(osd_req, 2,
2422 orig_request->pages, length,
2423 offset & ~PAGE_MASK, false, false);
2425 rbd_osd_req_format_write(orig_request);
2427 /* All set, send it off. */
2429 orig_request->callback = rbd_img_obj_copyup_callback;
2430 osdc = &rbd_dev->rbd_client->client->osdc;
2431 img_result = rbd_obj_request_submit(osdc, orig_request);
2435 /* Record the error code and complete the request */
2437 orig_request->result = img_result;
2438 orig_request->xferred = 0;
2439 obj_request_done_set(orig_request);
2440 rbd_obj_request_complete(orig_request);
2444 * Read from the parent image the range of data that covers the
2445 * entire target of the given object request. This is used for
2446 * satisfying a layered image write request when the target of an
2447 * object request from the image request does not exist.
2449 * A page array big enough to hold the returned data is allocated
2450 * and supplied to rbd_img_request_fill() as the "data descriptor."
2451 * When the read completes, this page array will be transferred to
2452 * the original object request for the copyup operation.
2454 * If an error occurs, record it as the result of the original
2455 * object request and mark it done so it gets completed.
2457 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2459 struct rbd_img_request *img_request = NULL;
2460 struct rbd_img_request *parent_request = NULL;
2461 struct rbd_device *rbd_dev;
2464 struct page **pages = NULL;
2468 rbd_assert(obj_request_img_data_test(obj_request));
2469 rbd_assert(obj_request_type_valid(obj_request->type));
2471 img_request = obj_request->img_request;
2472 rbd_assert(img_request != NULL);
2473 rbd_dev = img_request->rbd_dev;
2474 rbd_assert(rbd_dev->parent != NULL);
2477 * Determine the byte range covered by the object in the
2478 * child image to which the original request was to be sent.
2480 img_offset = obj_request->img_offset - obj_request->offset;
2481 length = (u64)1 << rbd_dev->header.obj_order;
2484 * There is no defined parent data beyond the parent
2485 * overlap, so limit what we read at that boundary if
2488 if (img_offset + length > rbd_dev->parent_overlap) {
2489 rbd_assert(img_offset < rbd_dev->parent_overlap);
2490 length = rbd_dev->parent_overlap - img_offset;
2494 * Allocate a page array big enough to receive the data read
2497 page_count = (u32)calc_pages_for(0, length);
2498 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2499 if (IS_ERR(pages)) {
2500 result = PTR_ERR(pages);
2506 parent_request = rbd_parent_request_create(obj_request,
2507 img_offset, length);
2508 if (!parent_request)
2511 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2514 parent_request->copyup_pages = pages;
2515 parent_request->copyup_page_count = page_count;
2517 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2518 result = rbd_img_request_submit(parent_request);
2522 parent_request->copyup_pages = NULL;
2523 parent_request->copyup_page_count = 0;
2524 parent_request->obj_request = NULL;
2525 rbd_obj_request_put(obj_request);
2528 ceph_release_page_vector(pages, page_count);
2530 rbd_img_request_put(parent_request);
2531 obj_request->result = result;
2532 obj_request->xferred = 0;
2533 obj_request_done_set(obj_request);
2538 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2540 struct rbd_obj_request *orig_request;
2541 struct rbd_device *rbd_dev;
2544 rbd_assert(!obj_request_img_data_test(obj_request));
2547 * All we need from the object request is the original
2548 * request and the result of the STAT op. Grab those, then
2549 * we're done with the request.
2551 orig_request = obj_request->obj_request;
2552 obj_request->obj_request = NULL;
2553 rbd_obj_request_put(orig_request);
2554 rbd_assert(orig_request);
2555 rbd_assert(orig_request->img_request);
2557 result = obj_request->result;
2558 obj_request->result = 0;
2560 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2561 obj_request, orig_request, result,
2562 obj_request->xferred, obj_request->length);
2563 rbd_obj_request_put(obj_request);
2566 * If the overlap has become 0 (most likely because the
2567 * image has been flattened) we need to free the pages
2568 * and re-submit the original write request.
2570 rbd_dev = orig_request->img_request->rbd_dev;
2571 if (!rbd_dev->parent_overlap) {
2572 struct ceph_osd_client *osdc;
2574 osdc = &rbd_dev->rbd_client->client->osdc;
2575 result = rbd_obj_request_submit(osdc, orig_request);
2581 * Our only purpose here is to determine whether the object
2582 * exists, and we don't want to treat the non-existence as
2583 * an error. If something else comes back, transfer the
2584 * error to the original request and complete it now.
2587 obj_request_existence_set(orig_request, true);
2588 } else if (result == -ENOENT) {
2589 obj_request_existence_set(orig_request, false);
2590 } else if (result) {
2591 orig_request->result = result;
2596 * Resubmit the original request now that we have recorded
2597 * whether the target object exists.
2599 orig_request->result = rbd_img_obj_request_submit(orig_request);
2601 if (orig_request->result)
2602 rbd_obj_request_complete(orig_request);
2605 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2607 struct rbd_obj_request *stat_request;
2608 struct rbd_device *rbd_dev;
2609 struct ceph_osd_client *osdc;
2610 struct page **pages = NULL;
2616 * The response data for a STAT call consists of:
2623 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2624 page_count = (u32)calc_pages_for(0, size);
2625 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2627 return PTR_ERR(pages);
2630 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2635 rbd_obj_request_get(obj_request);
2636 stat_request->obj_request = obj_request;
2637 stat_request->pages = pages;
2638 stat_request->page_count = page_count;
2640 rbd_assert(obj_request->img_request);
2641 rbd_dev = obj_request->img_request->rbd_dev;
2642 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2644 if (!stat_request->osd_req)
2646 stat_request->callback = rbd_img_obj_exists_callback;
2648 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2649 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2651 rbd_osd_req_format_read(stat_request);
2653 osdc = &rbd_dev->rbd_client->client->osdc;
2654 ret = rbd_obj_request_submit(osdc, stat_request);
2657 rbd_obj_request_put(obj_request);
2662 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2664 struct rbd_img_request *img_request;
2665 struct rbd_device *rbd_dev;
2668 rbd_assert(obj_request_img_data_test(obj_request));
2670 img_request = obj_request->img_request;
2671 rbd_assert(img_request);
2672 rbd_dev = img_request->rbd_dev;
2675 * Only writes to layered images need special handling.
2676 * Reads and non-layered writes are simple object requests.
2677 * Layered writes that start beyond the end of the overlap
2678 * with the parent have no parent data, so they too are
2679 * simple object requests. Finally, if the target object is
2680 * known to already exist, its parent data has already been
2681 * copied, so a write to the object can also be handled as a
2682 * simple object request.
2684 if (!img_request_write_test(img_request) ||
2685 !img_request_layered_test(img_request) ||
2686 rbd_dev->parent_overlap <= obj_request->img_offset ||
2687 ((known = obj_request_known_test(obj_request)) &&
2688 obj_request_exists_test(obj_request))) {
2690 struct rbd_device *rbd_dev;
2691 struct ceph_osd_client *osdc;
2693 rbd_dev = obj_request->img_request->rbd_dev;
2694 osdc = &rbd_dev->rbd_client->client->osdc;
2696 return rbd_obj_request_submit(osdc, obj_request);
2700 * It's a layered write. The target object might exist but
2701 * we may not know that yet. If we know it doesn't exist,
2702 * start by reading the data for the full target object from
2703 * the parent so we can use it for a copyup to the target.
2706 return rbd_img_obj_parent_read_full(obj_request);
2708 /* We don't know whether the target exists. Go find out. */
2710 return rbd_img_obj_exists_submit(obj_request);
2713 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2715 struct rbd_obj_request *obj_request;
2716 struct rbd_obj_request *next_obj_request;
2718 dout("%s: img %p\n", __func__, img_request);
2719 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2722 ret = rbd_img_obj_request_submit(obj_request);
2730 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2732 struct rbd_obj_request *obj_request;
2733 struct rbd_device *rbd_dev;
2738 rbd_assert(img_request_child_test(img_request));
2740 /* First get what we need from the image request and release it */
2742 obj_request = img_request->obj_request;
2743 img_xferred = img_request->xferred;
2744 img_result = img_request->result;
2745 rbd_img_request_put(img_request);
2748 * If the overlap has become 0 (most likely because the
2749 * image has been flattened) we need to re-submit the
2752 rbd_assert(obj_request);
2753 rbd_assert(obj_request->img_request);
2754 rbd_dev = obj_request->img_request->rbd_dev;
2755 if (!rbd_dev->parent_overlap) {
2756 struct ceph_osd_client *osdc;
2758 osdc = &rbd_dev->rbd_client->client->osdc;
2759 img_result = rbd_obj_request_submit(osdc, obj_request);
2764 obj_request->result = img_result;
2765 if (obj_request->result)
2769 * We need to zero anything beyond the parent overlap
2770 * boundary. Since rbd_img_obj_request_read_callback()
2771 * will zero anything beyond the end of a short read, an
2772 * easy way to do this is to pretend the data from the
2773 * parent came up short--ending at the overlap boundary.
2775 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2776 obj_end = obj_request->img_offset + obj_request->length;
2777 if (obj_end > rbd_dev->parent_overlap) {
2780 if (obj_request->img_offset < rbd_dev->parent_overlap)
2781 xferred = rbd_dev->parent_overlap -
2782 obj_request->img_offset;
2784 obj_request->xferred = min(img_xferred, xferred);
2786 obj_request->xferred = img_xferred;
2789 rbd_img_obj_request_read_callback(obj_request);
2790 rbd_obj_request_complete(obj_request);
2793 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2795 struct rbd_img_request *img_request;
2798 rbd_assert(obj_request_img_data_test(obj_request));
2799 rbd_assert(obj_request->img_request != NULL);
2800 rbd_assert(obj_request->result == (s32) -ENOENT);
2801 rbd_assert(obj_request_type_valid(obj_request->type));
2803 /* rbd_read_finish(obj_request, obj_request->length); */
2804 img_request = rbd_parent_request_create(obj_request,
2805 obj_request->img_offset,
2806 obj_request->length);
2811 if (obj_request->type == OBJ_REQUEST_BIO)
2812 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2813 obj_request->bio_list);
2815 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2816 obj_request->pages);
2820 img_request->callback = rbd_img_parent_read_callback;
2821 result = rbd_img_request_submit(img_request);
2828 rbd_img_request_put(img_request);
2829 obj_request->result = result;
2830 obj_request->xferred = 0;
2831 obj_request_done_set(obj_request);
2834 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2836 struct rbd_obj_request *obj_request;
2837 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2840 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2841 OBJ_REQUEST_NODATA);
2846 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
2848 if (!obj_request->osd_req)
2851 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2853 rbd_osd_req_format_read(obj_request);
2855 ret = rbd_obj_request_submit(osdc, obj_request);
2858 ret = rbd_obj_request_wait(obj_request);
2860 rbd_obj_request_put(obj_request);
2865 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2867 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2873 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2874 rbd_dev->header_name, (unsigned long long)notify_id,
2875 (unsigned int)opcode);
2876 ret = rbd_dev_refresh(rbd_dev);
2878 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2880 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2884 * Initiate a watch request, synchronously.
2886 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
2888 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2889 struct rbd_obj_request *obj_request;
2892 rbd_assert(!rbd_dev->watch_event);
2893 rbd_assert(!rbd_dev->watch_request);
2895 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2896 &rbd_dev->watch_event);
2900 rbd_assert(rbd_dev->watch_event);
2902 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2903 OBJ_REQUEST_NODATA);
2909 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2911 if (!obj_request->osd_req) {
2916 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2918 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2919 rbd_dev->watch_event->cookie, 0, 1);
2920 rbd_osd_req_format_write(obj_request);
2922 ret = rbd_obj_request_submit(osdc, obj_request);
2926 ret = rbd_obj_request_wait(obj_request);
2930 ret = obj_request->result;
2935 * A watch request is set to linger, so the underlying osd
2936 * request won't go away until we unregister it. We retain
2937 * a pointer to the object request during that time (in
2938 * rbd_dev->watch_request), so we'll keep a reference to
2939 * it. We'll drop that reference (below) after we've
2942 rbd_dev->watch_request = obj_request;
2947 ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req);
2949 rbd_obj_request_put(obj_request);
2951 ceph_osdc_cancel_event(rbd_dev->watch_event);
2952 rbd_dev->watch_event = NULL;
2958 * Tear down a watch request, synchronously.
2960 static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
2962 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2963 struct rbd_obj_request *obj_request;
2966 rbd_assert(rbd_dev->watch_event);
2967 rbd_assert(rbd_dev->watch_request);
2969 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2970 OBJ_REQUEST_NODATA);
2976 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
2978 if (!obj_request->osd_req) {
2983 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2984 rbd_dev->watch_event->cookie, 0, 0);
2985 rbd_osd_req_format_write(obj_request);
2987 ret = rbd_obj_request_submit(osdc, obj_request);
2991 ret = rbd_obj_request_wait(obj_request);
2995 ret = obj_request->result;
2999 /* We have successfully torn down the watch request */
3001 ceph_osdc_unregister_linger_request(osdc,
3002 rbd_dev->watch_request->osd_req);
3003 rbd_obj_request_put(rbd_dev->watch_request);
3004 rbd_dev->watch_request = NULL;
3007 rbd_obj_request_put(obj_request);
3009 ceph_osdc_cancel_event(rbd_dev->watch_event);
3010 rbd_dev->watch_event = NULL;
3015 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3019 ret = __rbd_dev_header_unwatch_sync(rbd_dev);
3021 rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
3027 * Synchronous osd object method call. Returns the number of bytes
3028 * returned in the outbound buffer, or a negative error code.
3030 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3031 const char *object_name,
3032 const char *class_name,
3033 const char *method_name,
3034 const void *outbound,
3035 size_t outbound_size,
3037 size_t inbound_size)
3039 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3040 struct rbd_obj_request *obj_request;
3041 struct page **pages;
3046 * Method calls are ultimately read operations. The result
3047 * should placed into the inbound buffer provided. They
3048 * also supply outbound data--parameters for the object
3049 * method. Currently if this is present it will be a
3052 page_count = (u32)calc_pages_for(0, inbound_size);
3053 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3055 return PTR_ERR(pages);
3058 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3063 obj_request->pages = pages;
3064 obj_request->page_count = page_count;
3066 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3068 if (!obj_request->osd_req)
3071 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3072 class_name, method_name);
3073 if (outbound_size) {
3074 struct ceph_pagelist *pagelist;
3076 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3080 ceph_pagelist_init(pagelist);
3081 ceph_pagelist_append(pagelist, outbound, outbound_size);
3082 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3085 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3086 obj_request->pages, inbound_size,
3088 rbd_osd_req_format_read(obj_request);
3090 ret = rbd_obj_request_submit(osdc, obj_request);
3093 ret = rbd_obj_request_wait(obj_request);
3097 ret = obj_request->result;
3101 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3102 ret = (int)obj_request->xferred;
3103 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3106 rbd_obj_request_put(obj_request);
3108 ceph_release_page_vector(pages, page_count);
3113 static void rbd_request_fn(struct request_queue *q)
3114 __releases(q->queue_lock) __acquires(q->queue_lock)
3116 struct rbd_device *rbd_dev = q->queuedata;
3117 bool read_only = rbd_dev->mapping.read_only;
3121 while ((rq = blk_fetch_request(q))) {
3122 bool write_request = rq_data_dir(rq) == WRITE;
3123 struct rbd_img_request *img_request;
3127 /* Ignore any non-FS requests that filter through. */
3129 if (rq->cmd_type != REQ_TYPE_FS) {
3130 dout("%s: non-fs request type %d\n", __func__,
3131 (int) rq->cmd_type);
3132 __blk_end_request_all(rq, 0);
3136 /* Ignore/skip any zero-length requests */
3138 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3139 length = (u64) blk_rq_bytes(rq);
3142 dout("%s: zero-length request\n", __func__);
3143 __blk_end_request_all(rq, 0);
3147 spin_unlock_irq(q->queue_lock);
3149 /* Disallow writes to a read-only device */
3151 if (write_request) {
3155 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3159 * Quit early if the mapped snapshot no longer
3160 * exists. It's still possible the snapshot will
3161 * have disappeared by the time our request arrives
3162 * at the osd, but there's no sense in sending it if
3165 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3166 dout("request for non-existent snapshot");
3167 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3173 if (offset && length > U64_MAX - offset + 1) {
3174 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3176 goto end_request; /* Shouldn't happen */
3180 if (offset + length > rbd_dev->mapping.size) {
3181 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3182 offset, length, rbd_dev->mapping.size);
3187 img_request = rbd_img_request_create(rbd_dev, offset, length,
3192 img_request->rq = rq;
3194 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3197 result = rbd_img_request_submit(img_request);
3199 rbd_img_request_put(img_request);
3201 spin_lock_irq(q->queue_lock);
3203 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3204 write_request ? "write" : "read",
3205 length, offset, result);
3207 __blk_end_request_all(rq, result);
3213 * a queue callback. Makes sure that we don't create a bio that spans across
3214 * multiple osd objects. One exception would be with a single page bios,
3215 * which we handle later at bio_chain_clone_range()
3217 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3218 struct bio_vec *bvec)
3220 struct rbd_device *rbd_dev = q->queuedata;
3221 sector_t sector_offset;
3222 sector_t sectors_per_obj;
3223 sector_t obj_sector_offset;
3227 * Find how far into its rbd object the partition-relative
3228 * bio start sector is to offset relative to the enclosing
3231 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3232 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3233 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3236 * Compute the number of bytes from that offset to the end
3237 * of the object. Account for what's already used by the bio.
3239 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3240 if (ret > bmd->bi_size)
3241 ret -= bmd->bi_size;
3246 * Don't send back more than was asked for. And if the bio
3247 * was empty, let the whole thing through because: "Note
3248 * that a block device *must* allow a single page to be
3249 * added to an empty bio."
3251 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3252 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3253 ret = (int) bvec->bv_len;
3258 static void rbd_free_disk(struct rbd_device *rbd_dev)
3260 struct gendisk *disk = rbd_dev->disk;
3265 rbd_dev->disk = NULL;
3266 if (disk->flags & GENHD_FL_UP) {
3269 blk_cleanup_queue(disk->queue);
3274 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3275 const char *object_name,
3276 u64 offset, u64 length, void *buf)
3279 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3280 struct rbd_obj_request *obj_request;
3281 struct page **pages = NULL;
3286 page_count = (u32) calc_pages_for(offset, length);
3287 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3289 ret = PTR_ERR(pages);
3292 obj_request = rbd_obj_request_create(object_name, offset, length,
3297 obj_request->pages = pages;
3298 obj_request->page_count = page_count;
3300 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, 1,
3302 if (!obj_request->osd_req)
3305 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3306 offset, length, 0, 0);
3307 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3309 obj_request->length,
3310 obj_request->offset & ~PAGE_MASK,
3312 rbd_osd_req_format_read(obj_request);
3314 ret = rbd_obj_request_submit(osdc, obj_request);
3317 ret = rbd_obj_request_wait(obj_request);
3321 ret = obj_request->result;
3325 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3326 size = (size_t) obj_request->xferred;
3327 ceph_copy_from_page_vector(pages, buf, 0, size);
3328 rbd_assert(size <= (size_t)INT_MAX);
3332 rbd_obj_request_put(obj_request);
3334 ceph_release_page_vector(pages, page_count);
3340 * Read the complete header for the given rbd device. On successful
3341 * return, the rbd_dev->header field will contain up-to-date
3342 * information about the image.
3344 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3346 struct rbd_image_header_ondisk *ondisk = NULL;
3353 * The complete header will include an array of its 64-bit
3354 * snapshot ids, followed by the names of those snapshots as
3355 * a contiguous block of NUL-terminated strings. Note that
3356 * the number of snapshots could change by the time we read
3357 * it in, in which case we re-read it.
3364 size = sizeof (*ondisk);
3365 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3367 ondisk = kmalloc(size, GFP_KERNEL);
3371 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3375 if ((size_t)ret < size) {
3377 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3381 if (!rbd_dev_ondisk_valid(ondisk)) {
3383 rbd_warn(rbd_dev, "invalid header");
3387 names_size = le64_to_cpu(ondisk->snap_names_len);
3388 want_count = snap_count;
3389 snap_count = le32_to_cpu(ondisk->snap_count);
3390 } while (snap_count != want_count);
3392 ret = rbd_header_from_disk(rbd_dev, ondisk);
3400 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3401 * has disappeared from the (just updated) snapshot context.
3403 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3407 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3410 snap_id = rbd_dev->spec->snap_id;
3411 if (snap_id == CEPH_NOSNAP)
3414 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3415 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3418 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3424 * Don't hold the lock while doing disk operations,
3425 * or lock ordering will conflict with the bdev mutex via:
3426 * rbd_add() -> blkdev_get() -> rbd_open()
3428 spin_lock_irq(&rbd_dev->lock);
3429 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3430 spin_unlock_irq(&rbd_dev->lock);
3432 * If the device is being removed, rbd_dev->disk has
3433 * been destroyed, so don't try to update its size
3436 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3437 dout("setting size to %llu sectors", (unsigned long long)size);
3438 set_capacity(rbd_dev->disk, size);
3439 revalidate_disk(rbd_dev->disk);
3443 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3448 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3449 down_write(&rbd_dev->header_rwsem);
3450 mapping_size = rbd_dev->mapping.size;
3451 if (rbd_dev->image_format == 1)
3452 ret = rbd_dev_v1_header_info(rbd_dev);
3454 ret = rbd_dev_v2_header_info(rbd_dev);
3456 /* If it's a mapped snapshot, validate its EXISTS flag */
3458 rbd_exists_validate(rbd_dev);
3459 up_write(&rbd_dev->header_rwsem);
3461 if (mapping_size != rbd_dev->mapping.size) {
3462 rbd_dev_update_size(rbd_dev);
3468 static int rbd_init_disk(struct rbd_device *rbd_dev)
3470 struct gendisk *disk;
3471 struct request_queue *q;
3474 /* create gendisk info */
3475 disk = alloc_disk(single_major ?
3476 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3477 RBD_MINORS_PER_MAJOR);
3481 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3483 disk->major = rbd_dev->major;
3484 disk->first_minor = rbd_dev->minor;
3486 disk->flags |= GENHD_FL_EXT_DEVT;
3487 disk->fops = &rbd_bd_ops;
3488 disk->private_data = rbd_dev;
3490 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3494 /* We use the default size, but let's be explicit about it. */
3495 blk_queue_physical_block_size(q, SECTOR_SIZE);
3497 /* set io sizes to object size */
3498 segment_size = rbd_obj_bytes(&rbd_dev->header);
3499 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3500 blk_queue_max_segment_size(q, segment_size);
3501 blk_queue_io_min(q, segment_size);
3502 blk_queue_io_opt(q, segment_size);
3504 blk_queue_merge_bvec(q, rbd_merge_bvec);
3507 q->queuedata = rbd_dev;
3509 rbd_dev->disk = disk;
3522 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3524 return container_of(dev, struct rbd_device, dev);
3527 static ssize_t rbd_size_show(struct device *dev,
3528 struct device_attribute *attr, char *buf)
3530 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3532 return sprintf(buf, "%llu\n",
3533 (unsigned long long)rbd_dev->mapping.size);
3537 * Note this shows the features for whatever's mapped, which is not
3538 * necessarily the base image.
3540 static ssize_t rbd_features_show(struct device *dev,
3541 struct device_attribute *attr, char *buf)
3543 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3545 return sprintf(buf, "0x%016llx\n",
3546 (unsigned long long)rbd_dev->mapping.features);
3549 static ssize_t rbd_major_show(struct device *dev,
3550 struct device_attribute *attr, char *buf)
3552 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3555 return sprintf(buf, "%d\n", rbd_dev->major);
3557 return sprintf(buf, "(none)\n");
3560 static ssize_t rbd_minor_show(struct device *dev,
3561 struct device_attribute *attr, char *buf)
3563 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3565 return sprintf(buf, "%d\n", rbd_dev->minor);
3568 static ssize_t rbd_client_id_show(struct device *dev,
3569 struct device_attribute *attr, char *buf)
3571 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3573 return sprintf(buf, "client%lld\n",
3574 ceph_client_id(rbd_dev->rbd_client->client));
3577 static ssize_t rbd_pool_show(struct device *dev,
3578 struct device_attribute *attr, char *buf)
3580 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3582 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3585 static ssize_t rbd_pool_id_show(struct device *dev,
3586 struct device_attribute *attr, char *buf)
3588 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3590 return sprintf(buf, "%llu\n",
3591 (unsigned long long) rbd_dev->spec->pool_id);
3594 static ssize_t rbd_name_show(struct device *dev,
3595 struct device_attribute *attr, char *buf)
3597 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3599 if (rbd_dev->spec->image_name)
3600 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3602 return sprintf(buf, "(unknown)\n");
3605 static ssize_t rbd_image_id_show(struct device *dev,
3606 struct device_attribute *attr, char *buf)
3608 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3610 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3614 * Shows the name of the currently-mapped snapshot (or
3615 * RBD_SNAP_HEAD_NAME for the base image).
3617 static ssize_t rbd_snap_show(struct device *dev,
3618 struct device_attribute *attr,
3621 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3623 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3627 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3628 * for the parent image. If there is no parent, simply shows
3629 * "(no parent image)".
3631 static ssize_t rbd_parent_show(struct device *dev,
3632 struct device_attribute *attr,
3635 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3636 struct rbd_spec *spec = rbd_dev->parent_spec;
3641 return sprintf(buf, "(no parent image)\n");
3643 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3644 (unsigned long long) spec->pool_id, spec->pool_name);
3649 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3650 spec->image_name ? spec->image_name : "(unknown)");
3655 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3656 (unsigned long long) spec->snap_id, spec->snap_name);
3661 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3666 return (ssize_t) (bufp - buf);
3669 static ssize_t rbd_image_refresh(struct device *dev,
3670 struct device_attribute *attr,
3674 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3677 ret = rbd_dev_refresh(rbd_dev);
3679 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3681 return ret < 0 ? ret : size;
3684 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3685 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3686 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3687 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3688 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3689 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3690 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3691 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3692 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3693 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3694 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3695 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3697 static struct attribute *rbd_attrs[] = {
3698 &dev_attr_size.attr,
3699 &dev_attr_features.attr,
3700 &dev_attr_major.attr,
3701 &dev_attr_minor.attr,
3702 &dev_attr_client_id.attr,
3703 &dev_attr_pool.attr,
3704 &dev_attr_pool_id.attr,
3705 &dev_attr_name.attr,
3706 &dev_attr_image_id.attr,
3707 &dev_attr_current_snap.attr,
3708 &dev_attr_parent.attr,
3709 &dev_attr_refresh.attr,
3713 static struct attribute_group rbd_attr_group = {
3717 static const struct attribute_group *rbd_attr_groups[] = {
3722 static void rbd_sysfs_dev_release(struct device *dev)
3726 static struct device_type rbd_device_type = {
3728 .groups = rbd_attr_groups,
3729 .release = rbd_sysfs_dev_release,
3732 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3734 kref_get(&spec->kref);
3739 static void rbd_spec_free(struct kref *kref);
3740 static void rbd_spec_put(struct rbd_spec *spec)
3743 kref_put(&spec->kref, rbd_spec_free);
3746 static struct rbd_spec *rbd_spec_alloc(void)
3748 struct rbd_spec *spec;
3750 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3753 kref_init(&spec->kref);
3758 static void rbd_spec_free(struct kref *kref)
3760 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3762 kfree(spec->pool_name);
3763 kfree(spec->image_id);
3764 kfree(spec->image_name);
3765 kfree(spec->snap_name);
3769 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3770 struct rbd_spec *spec)
3772 struct rbd_device *rbd_dev;
3774 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3778 spin_lock_init(&rbd_dev->lock);
3780 atomic_set(&rbd_dev->parent_ref, 0);
3781 INIT_LIST_HEAD(&rbd_dev->node);
3782 init_rwsem(&rbd_dev->header_rwsem);
3784 rbd_dev->spec = spec;
3785 rbd_dev->rbd_client = rbdc;
3787 /* Initialize the layout used for all rbd requests */
3789 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3790 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3791 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3792 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3797 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3799 rbd_put_client(rbd_dev->rbd_client);
3800 rbd_spec_put(rbd_dev->spec);
3805 * Get the size and object order for an image snapshot, or if
3806 * snap_id is CEPH_NOSNAP, gets this information for the base
3809 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3810 u8 *order, u64 *snap_size)
3812 __le64 snapid = cpu_to_le64(snap_id);
3817 } __attribute__ ((packed)) size_buf = { 0 };
3819 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3821 &snapid, sizeof (snapid),
3822 &size_buf, sizeof (size_buf));
3823 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3826 if (ret < sizeof (size_buf))
3830 *order = size_buf.order;
3831 dout(" order %u", (unsigned int)*order);
3833 *snap_size = le64_to_cpu(size_buf.size);
3835 dout(" snap_id 0x%016llx snap_size = %llu\n",
3836 (unsigned long long)snap_id,
3837 (unsigned long long)*snap_size);
3842 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3844 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3845 &rbd_dev->header.obj_order,
3846 &rbd_dev->header.image_size);
3849 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3855 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3859 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3860 "rbd", "get_object_prefix", NULL, 0,
3861 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3862 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3867 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3868 p + ret, NULL, GFP_NOIO);
3871 if (IS_ERR(rbd_dev->header.object_prefix)) {
3872 ret = PTR_ERR(rbd_dev->header.object_prefix);
3873 rbd_dev->header.object_prefix = NULL;
3875 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3883 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3886 __le64 snapid = cpu_to_le64(snap_id);
3890 } __attribute__ ((packed)) features_buf = { 0 };
3894 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3895 "rbd", "get_features",
3896 &snapid, sizeof (snapid),
3897 &features_buf, sizeof (features_buf));
3898 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3901 if (ret < sizeof (features_buf))
3904 incompat = le64_to_cpu(features_buf.incompat);
3905 if (incompat & ~RBD_FEATURES_SUPPORTED)
3908 *snap_features = le64_to_cpu(features_buf.features);
3910 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3911 (unsigned long long)snap_id,
3912 (unsigned long long)*snap_features,
3913 (unsigned long long)le64_to_cpu(features_buf.incompat));
3918 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3920 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3921 &rbd_dev->header.features);
3924 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3926 struct rbd_spec *parent_spec;
3928 void *reply_buf = NULL;
3938 parent_spec = rbd_spec_alloc();
3942 size = sizeof (__le64) + /* pool_id */
3943 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3944 sizeof (__le64) + /* snap_id */
3945 sizeof (__le64); /* overlap */
3946 reply_buf = kmalloc(size, GFP_KERNEL);
3952 snapid = cpu_to_le64(CEPH_NOSNAP);
3953 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3954 "rbd", "get_parent",
3955 &snapid, sizeof (snapid),
3957 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3962 end = reply_buf + ret;
3964 ceph_decode_64_safe(&p, end, pool_id, out_err);
3965 if (pool_id == CEPH_NOPOOL) {
3967 * Either the parent never existed, or we have
3968 * record of it but the image got flattened so it no
3969 * longer has a parent. When the parent of a
3970 * layered image disappears we immediately set the
3971 * overlap to 0. The effect of this is that all new
3972 * requests will be treated as if the image had no
3975 if (rbd_dev->parent_overlap) {
3976 rbd_dev->parent_overlap = 0;
3978 rbd_dev_parent_put(rbd_dev);
3979 pr_info("%s: clone image has been flattened\n",
3980 rbd_dev->disk->disk_name);
3983 goto out; /* No parent? No problem. */
3986 /* The ceph file layout needs to fit pool id in 32 bits */
3989 if (pool_id > (u64)U32_MAX) {
3990 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3991 (unsigned long long)pool_id, U32_MAX);
3995 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3996 if (IS_ERR(image_id)) {
3997 ret = PTR_ERR(image_id);
4000 ceph_decode_64_safe(&p, end, snap_id, out_err);
4001 ceph_decode_64_safe(&p, end, overlap, out_err);
4004 * The parent won't change (except when the clone is
4005 * flattened, already handled that). So we only need to
4006 * record the parent spec we have not already done so.
4008 if (!rbd_dev->parent_spec) {
4009 parent_spec->pool_id = pool_id;
4010 parent_spec->image_id = image_id;
4011 parent_spec->snap_id = snap_id;
4012 rbd_dev->parent_spec = parent_spec;
4013 parent_spec = NULL; /* rbd_dev now owns this */
4017 * We always update the parent overlap. If it's zero we
4018 * treat it specially.
4020 rbd_dev->parent_overlap = overlap;
4024 /* A null parent_spec indicates it's the initial probe */
4028 * The overlap has become zero, so the clone
4029 * must have been resized down to 0 at some
4030 * point. Treat this the same as a flatten.
4032 rbd_dev_parent_put(rbd_dev);
4033 pr_info("%s: clone image now standalone\n",
4034 rbd_dev->disk->disk_name);
4037 * For the initial probe, if we find the
4038 * overlap is zero we just pretend there was
4041 rbd_warn(rbd_dev, "ignoring parent of "
4042 "clone with overlap 0\n");
4049 rbd_spec_put(parent_spec);
4054 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4058 __le64 stripe_count;
4059 } __attribute__ ((packed)) striping_info_buf = { 0 };
4060 size_t size = sizeof (striping_info_buf);
4067 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4068 "rbd", "get_stripe_unit_count", NULL, 0,
4069 (char *)&striping_info_buf, size);
4070 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4077 * We don't actually support the "fancy striping" feature
4078 * (STRIPINGV2) yet, but if the striping sizes are the
4079 * defaults the behavior is the same as before. So find
4080 * out, and only fail if the image has non-default values.
4083 obj_size = (u64)1 << rbd_dev->header.obj_order;
4084 p = &striping_info_buf;
4085 stripe_unit = ceph_decode_64(&p);
4086 if (stripe_unit != obj_size) {
4087 rbd_warn(rbd_dev, "unsupported stripe unit "
4088 "(got %llu want %llu)",
4089 stripe_unit, obj_size);
4092 stripe_count = ceph_decode_64(&p);
4093 if (stripe_count != 1) {
4094 rbd_warn(rbd_dev, "unsupported stripe count "
4095 "(got %llu want 1)", stripe_count);
4098 rbd_dev->header.stripe_unit = stripe_unit;
4099 rbd_dev->header.stripe_count = stripe_count;
4104 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4106 size_t image_id_size;
4111 void *reply_buf = NULL;
4113 char *image_name = NULL;
4116 rbd_assert(!rbd_dev->spec->image_name);
4118 len = strlen(rbd_dev->spec->image_id);
4119 image_id_size = sizeof (__le32) + len;
4120 image_id = kmalloc(image_id_size, GFP_KERNEL);
4125 end = image_id + image_id_size;
4126 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4128 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4129 reply_buf = kmalloc(size, GFP_KERNEL);
4133 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4134 "rbd", "dir_get_name",
4135 image_id, image_id_size,
4140 end = reply_buf + ret;
4142 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4143 if (IS_ERR(image_name))
4146 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4154 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4156 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4157 const char *snap_name;
4160 /* Skip over names until we find the one we are looking for */
4162 snap_name = rbd_dev->header.snap_names;
4163 while (which < snapc->num_snaps) {
4164 if (!strcmp(name, snap_name))
4165 return snapc->snaps[which];
4166 snap_name += strlen(snap_name) + 1;
4172 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4174 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4179 for (which = 0; !found && which < snapc->num_snaps; which++) {
4180 const char *snap_name;
4182 snap_id = snapc->snaps[which];
4183 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4184 if (IS_ERR(snap_name)) {
4185 /* ignore no-longer existing snapshots */
4186 if (PTR_ERR(snap_name) == -ENOENT)
4191 found = !strcmp(name, snap_name);
4194 return found ? snap_id : CEPH_NOSNAP;
4198 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4199 * no snapshot by that name is found, or if an error occurs.
4201 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4203 if (rbd_dev->image_format == 1)
4204 return rbd_v1_snap_id_by_name(rbd_dev, name);
4206 return rbd_v2_snap_id_by_name(rbd_dev, name);
4210 * When an rbd image has a parent image, it is identified by the
4211 * pool, image, and snapshot ids (not names). This function fills
4212 * in the names for those ids. (It's OK if we can't figure out the
4213 * name for an image id, but the pool and snapshot ids should always
4214 * exist and have names.) All names in an rbd spec are dynamically
4217 * When an image being mapped (not a parent) is probed, we have the
4218 * pool name and pool id, image name and image id, and the snapshot
4219 * name. The only thing we're missing is the snapshot id.
4221 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4223 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4224 struct rbd_spec *spec = rbd_dev->spec;
4225 const char *pool_name;
4226 const char *image_name;
4227 const char *snap_name;
4231 * An image being mapped will have the pool name (etc.), but
4232 * we need to look up the snapshot id.
4234 if (spec->pool_name) {
4235 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4238 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4239 if (snap_id == CEPH_NOSNAP)
4241 spec->snap_id = snap_id;
4243 spec->snap_id = CEPH_NOSNAP;
4249 /* Get the pool name; we have to make our own copy of this */
4251 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4253 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4256 pool_name = kstrdup(pool_name, GFP_KERNEL);
4260 /* Fetch the image name; tolerate failure here */
4262 image_name = rbd_dev_image_name(rbd_dev);
4264 rbd_warn(rbd_dev, "unable to get image name");
4266 /* Look up the snapshot name, and make a copy */
4268 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4269 if (IS_ERR(snap_name)) {
4270 ret = PTR_ERR(snap_name);
4274 spec->pool_name = pool_name;
4275 spec->image_name = image_name;
4276 spec->snap_name = snap_name;
4286 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4295 struct ceph_snap_context *snapc;
4299 * We'll need room for the seq value (maximum snapshot id),
4300 * snapshot count, and array of that many snapshot ids.
4301 * For now we have a fixed upper limit on the number we're
4302 * prepared to receive.
4304 size = sizeof (__le64) + sizeof (__le32) +
4305 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4306 reply_buf = kzalloc(size, GFP_KERNEL);
4310 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4311 "rbd", "get_snapcontext", NULL, 0,
4313 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4318 end = reply_buf + ret;
4320 ceph_decode_64_safe(&p, end, seq, out);
4321 ceph_decode_32_safe(&p, end, snap_count, out);
4324 * Make sure the reported number of snapshot ids wouldn't go
4325 * beyond the end of our buffer. But before checking that,
4326 * make sure the computed size of the snapshot context we
4327 * allocate is representable in a size_t.
4329 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4334 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4338 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4344 for (i = 0; i < snap_count; i++)
4345 snapc->snaps[i] = ceph_decode_64(&p);
4347 ceph_put_snap_context(rbd_dev->header.snapc);
4348 rbd_dev->header.snapc = snapc;
4350 dout(" snap context seq = %llu, snap_count = %u\n",
4351 (unsigned long long)seq, (unsigned int)snap_count);
4358 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4369 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4370 reply_buf = kmalloc(size, GFP_KERNEL);
4372 return ERR_PTR(-ENOMEM);
4374 snapid = cpu_to_le64(snap_id);
4375 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4376 "rbd", "get_snapshot_name",
4377 &snapid, sizeof (snapid),
4379 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4381 snap_name = ERR_PTR(ret);
4386 end = reply_buf + ret;
4387 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4388 if (IS_ERR(snap_name))
4391 dout(" snap_id 0x%016llx snap_name = %s\n",
4392 (unsigned long long)snap_id, snap_name);
4399 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4401 bool first_time = rbd_dev->header.object_prefix == NULL;
4404 ret = rbd_dev_v2_image_size(rbd_dev);
4409 ret = rbd_dev_v2_header_onetime(rbd_dev);
4415 * If the image supports layering, get the parent info. We
4416 * need to probe the first time regardless. Thereafter we
4417 * only need to if there's a parent, to see if it has
4418 * disappeared due to the mapped image getting flattened.
4420 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4421 (first_time || rbd_dev->parent_spec)) {
4424 ret = rbd_dev_v2_parent_info(rbd_dev);
4429 * Print a warning if this is the initial probe and
4430 * the image has a parent. Don't print it if the
4431 * image now being probed is itself a parent. We
4432 * can tell at this point because we won't know its
4433 * pool name yet (just its pool id).
4435 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4436 if (first_time && warn)
4437 rbd_warn(rbd_dev, "WARNING: kernel layering "
4438 "is EXPERIMENTAL!");
4441 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4442 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4443 rbd_dev->mapping.size = rbd_dev->header.image_size;
4445 ret = rbd_dev_v2_snap_context(rbd_dev);
4446 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4451 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4456 dev = &rbd_dev->dev;
4457 dev->bus = &rbd_bus_type;
4458 dev->type = &rbd_device_type;
4459 dev->parent = &rbd_root_dev;
4460 dev->release = rbd_dev_device_release;
4461 dev_set_name(dev, "%d", rbd_dev->dev_id);
4462 ret = device_register(dev);
4467 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4469 device_unregister(&rbd_dev->dev);
4473 * Get a unique rbd identifier for the given new rbd_dev, and add
4474 * the rbd_dev to the global list.
4476 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4480 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4481 0, minor_to_rbd_dev_id(1 << MINORBITS),
4486 rbd_dev->dev_id = new_dev_id;
4488 spin_lock(&rbd_dev_list_lock);
4489 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4490 spin_unlock(&rbd_dev_list_lock);
4492 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4498 * Remove an rbd_dev from the global list, and record that its
4499 * identifier is no longer in use.
4501 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4503 spin_lock(&rbd_dev_list_lock);
4504 list_del_init(&rbd_dev->node);
4505 spin_unlock(&rbd_dev_list_lock);
4507 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4509 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4513 * Skips over white space at *buf, and updates *buf to point to the
4514 * first found non-space character (if any). Returns the length of
4515 * the token (string of non-white space characters) found. Note
4516 * that *buf must be terminated with '\0'.
4518 static inline size_t next_token(const char **buf)
4521 * These are the characters that produce nonzero for
4522 * isspace() in the "C" and "POSIX" locales.
4524 const char *spaces = " \f\n\r\t\v";
4526 *buf += strspn(*buf, spaces); /* Find start of token */
4528 return strcspn(*buf, spaces); /* Return token length */
4532 * Finds the next token in *buf, and if the provided token buffer is
4533 * big enough, copies the found token into it. The result, if
4534 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4535 * must be terminated with '\0' on entry.
4537 * Returns the length of the token found (not including the '\0').
4538 * Return value will be 0 if no token is found, and it will be >=
4539 * token_size if the token would not fit.
4541 * The *buf pointer will be updated to point beyond the end of the
4542 * found token. Note that this occurs even if the token buffer is
4543 * too small to hold it.
4545 static inline size_t copy_token(const char **buf,
4551 len = next_token(buf);
4552 if (len < token_size) {
4553 memcpy(token, *buf, len);
4554 *(token + len) = '\0';
4562 * Finds the next token in *buf, dynamically allocates a buffer big
4563 * enough to hold a copy of it, and copies the token into the new
4564 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4565 * that a duplicate buffer is created even for a zero-length token.
4567 * Returns a pointer to the newly-allocated duplicate, or a null
4568 * pointer if memory for the duplicate was not available. If
4569 * the lenp argument is a non-null pointer, the length of the token
4570 * (not including the '\0') is returned in *lenp.
4572 * If successful, the *buf pointer will be updated to point beyond
4573 * the end of the found token.
4575 * Note: uses GFP_KERNEL for allocation.
4577 static inline char *dup_token(const char **buf, size_t *lenp)
4582 len = next_token(buf);
4583 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4586 *(dup + len) = '\0';
4596 * Parse the options provided for an "rbd add" (i.e., rbd image
4597 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4598 * and the data written is passed here via a NUL-terminated buffer.
4599 * Returns 0 if successful or an error code otherwise.
4601 * The information extracted from these options is recorded in
4602 * the other parameters which return dynamically-allocated
4605 * The address of a pointer that will refer to a ceph options
4606 * structure. Caller must release the returned pointer using
4607 * ceph_destroy_options() when it is no longer needed.
4609 * Address of an rbd options pointer. Fully initialized by
4610 * this function; caller must release with kfree().
4612 * Address of an rbd image specification pointer. Fully
4613 * initialized by this function based on parsed options.
4614 * Caller must release with rbd_spec_put().
4616 * The options passed take this form:
4617 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4620 * A comma-separated list of one or more monitor addresses.
4621 * A monitor address is an ip address, optionally followed
4622 * by a port number (separated by a colon).
4623 * I.e.: ip1[:port1][,ip2[:port2]...]
4625 * A comma-separated list of ceph and/or rbd options.
4627 * The name of the rados pool containing the rbd image.
4629 * The name of the image in that pool to map.
4631 * An optional snapshot id. If provided, the mapping will
4632 * present data from the image at the time that snapshot was
4633 * created. The image head is used if no snapshot id is
4634 * provided. Snapshot mappings are always read-only.
4636 static int rbd_add_parse_args(const char *buf,
4637 struct ceph_options **ceph_opts,
4638 struct rbd_options **opts,
4639 struct rbd_spec **rbd_spec)
4643 const char *mon_addrs;
4645 size_t mon_addrs_size;
4646 struct rbd_spec *spec = NULL;
4647 struct rbd_options *rbd_opts = NULL;
4648 struct ceph_options *copts;
4651 /* The first four tokens are required */
4653 len = next_token(&buf);
4655 rbd_warn(NULL, "no monitor address(es) provided");
4659 mon_addrs_size = len + 1;
4663 options = dup_token(&buf, NULL);
4667 rbd_warn(NULL, "no options provided");
4671 spec = rbd_spec_alloc();
4675 spec->pool_name = dup_token(&buf, NULL);
4676 if (!spec->pool_name)
4678 if (!*spec->pool_name) {
4679 rbd_warn(NULL, "no pool name provided");
4683 spec->image_name = dup_token(&buf, NULL);
4684 if (!spec->image_name)
4686 if (!*spec->image_name) {
4687 rbd_warn(NULL, "no image name provided");
4692 * Snapshot name is optional; default is to use "-"
4693 * (indicating the head/no snapshot).
4695 len = next_token(&buf);
4697 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4698 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4699 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4700 ret = -ENAMETOOLONG;
4703 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4706 *(snap_name + len) = '\0';
4707 spec->snap_name = snap_name;
4709 /* Initialize all rbd options to the defaults */
4711 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4715 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4717 copts = ceph_parse_options(options, mon_addrs,
4718 mon_addrs + mon_addrs_size - 1,
4719 parse_rbd_opts_token, rbd_opts);
4720 if (IS_ERR(copts)) {
4721 ret = PTR_ERR(copts);
4742 * Return pool id (>= 0) or a negative error code.
4744 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4747 unsigned long timeout = rbdc->client->options->mount_timeout * HZ;
4752 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4753 if (ret == -ENOENT && tries++ < 1) {
4754 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4759 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4760 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4761 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4762 newest_epoch, timeout);
4765 /* the osdmap we have is new enough */
4774 * An rbd format 2 image has a unique identifier, distinct from the
4775 * name given to it by the user. Internally, that identifier is
4776 * what's used to specify the names of objects related to the image.
4778 * A special "rbd id" object is used to map an rbd image name to its
4779 * id. If that object doesn't exist, then there is no v2 rbd image
4780 * with the supplied name.
4782 * This function will record the given rbd_dev's image_id field if
4783 * it can be determined, and in that case will return 0. If any
4784 * errors occur a negative errno will be returned and the rbd_dev's
4785 * image_id field will be unchanged (and should be NULL).
4787 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4796 * When probing a parent image, the image id is already
4797 * known (and the image name likely is not). There's no
4798 * need to fetch the image id again in this case. We
4799 * do still need to set the image format though.
4801 if (rbd_dev->spec->image_id) {
4802 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4808 * First, see if the format 2 image id file exists, and if
4809 * so, get the image's persistent id from it.
4811 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4812 object_name = kmalloc(size, GFP_NOIO);
4815 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4816 dout("rbd id object name is %s\n", object_name);
4818 /* Response will be an encoded string, which includes a length */
4820 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4821 response = kzalloc(size, GFP_NOIO);
4827 /* If it doesn't exist we'll assume it's a format 1 image */
4829 ret = rbd_obj_method_sync(rbd_dev, object_name,
4830 "rbd", "get_id", NULL, 0,
4831 response, RBD_IMAGE_ID_LEN_MAX);
4832 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4833 if (ret == -ENOENT) {
4834 image_id = kstrdup("", GFP_KERNEL);
4835 ret = image_id ? 0 : -ENOMEM;
4837 rbd_dev->image_format = 1;
4838 } else if (ret > sizeof (__le32)) {
4841 image_id = ceph_extract_encoded_string(&p, p + ret,
4843 ret = PTR_ERR_OR_ZERO(image_id);
4845 rbd_dev->image_format = 2;
4851 rbd_dev->spec->image_id = image_id;
4852 dout("image_id is %s\n", image_id);
4862 * Undo whatever state changes are made by v1 or v2 header info
4865 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4867 struct rbd_image_header *header;
4869 /* Drop parent reference unless it's already been done (or none) */
4871 if (rbd_dev->parent_overlap)
4872 rbd_dev_parent_put(rbd_dev);
4874 /* Free dynamic fields from the header, then zero it out */
4876 header = &rbd_dev->header;
4877 ceph_put_snap_context(header->snapc);
4878 kfree(header->snap_sizes);
4879 kfree(header->snap_names);
4880 kfree(header->object_prefix);
4881 memset(header, 0, sizeof (*header));
4884 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4888 ret = rbd_dev_v2_object_prefix(rbd_dev);
4893 * Get the and check features for the image. Currently the
4894 * features are assumed to never change.
4896 ret = rbd_dev_v2_features(rbd_dev);
4900 /* If the image supports fancy striping, get its parameters */
4902 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4903 ret = rbd_dev_v2_striping_info(rbd_dev);
4907 /* No support for crypto and compression type format 2 images */
4911 rbd_dev->header.features = 0;
4912 kfree(rbd_dev->header.object_prefix);
4913 rbd_dev->header.object_prefix = NULL;
4918 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4920 struct rbd_device *parent = NULL;
4921 struct rbd_spec *parent_spec;
4922 struct rbd_client *rbdc;
4925 if (!rbd_dev->parent_spec)
4928 * We need to pass a reference to the client and the parent
4929 * spec when creating the parent rbd_dev. Images related by
4930 * parent/child relationships always share both.
4932 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4933 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4936 parent = rbd_dev_create(rbdc, parent_spec);
4940 ret = rbd_dev_image_probe(parent, false);
4943 rbd_dev->parent = parent;
4944 atomic_set(&rbd_dev->parent_ref, 1);
4949 rbd_dev_unparent(rbd_dev);
4950 kfree(rbd_dev->header_name);
4951 rbd_dev_destroy(parent);
4953 rbd_put_client(rbdc);
4954 rbd_spec_put(parent_spec);
4960 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4964 /* Get an id and fill in device name. */
4966 ret = rbd_dev_id_get(rbd_dev);
4970 BUILD_BUG_ON(DEV_NAME_LEN
4971 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4972 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4974 /* Record our major and minor device numbers. */
4976 if (!single_major) {
4977 ret = register_blkdev(0, rbd_dev->name);
4981 rbd_dev->major = ret;
4984 rbd_dev->major = rbd_major;
4985 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
4988 /* Set up the blkdev mapping. */
4990 ret = rbd_init_disk(rbd_dev);
4992 goto err_out_blkdev;
4994 ret = rbd_dev_mapping_set(rbd_dev);
4997 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4999 ret = rbd_bus_add_dev(rbd_dev);
5001 goto err_out_mapping;
5003 /* Everything's ready. Announce the disk to the world. */
5005 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5006 add_disk(rbd_dev->disk);
5008 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5009 (unsigned long long) rbd_dev->mapping.size);
5014 rbd_dev_mapping_clear(rbd_dev);
5016 rbd_free_disk(rbd_dev);
5019 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5021 rbd_dev_id_put(rbd_dev);
5022 rbd_dev_mapping_clear(rbd_dev);
5027 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5029 struct rbd_spec *spec = rbd_dev->spec;
5032 /* Record the header object name for this rbd image. */
5034 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5036 if (rbd_dev->image_format == 1)
5037 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5039 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5041 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5042 if (!rbd_dev->header_name)
5045 if (rbd_dev->image_format == 1)
5046 sprintf(rbd_dev->header_name, "%s%s",
5047 spec->image_name, RBD_SUFFIX);
5049 sprintf(rbd_dev->header_name, "%s%s",
5050 RBD_HEADER_PREFIX, spec->image_id);
5054 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5056 rbd_dev_unprobe(rbd_dev);
5057 kfree(rbd_dev->header_name);
5058 rbd_dev->header_name = NULL;
5059 rbd_dev->image_format = 0;
5060 kfree(rbd_dev->spec->image_id);
5061 rbd_dev->spec->image_id = NULL;
5063 rbd_dev_destroy(rbd_dev);
5067 * Probe for the existence of the header object for the given rbd
5068 * device. If this image is the one being mapped (i.e., not a
5069 * parent), initiate a watch on its header object before using that
5070 * object to get detailed information about the rbd image.
5072 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5077 * Get the id from the image id object. Unless there's an
5078 * error, rbd_dev->spec->image_id will be filled in with
5079 * a dynamically-allocated string, and rbd_dev->image_format
5080 * will be set to either 1 or 2.
5082 ret = rbd_dev_image_id(rbd_dev);
5085 rbd_assert(rbd_dev->spec->image_id);
5086 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5088 ret = rbd_dev_header_name(rbd_dev);
5090 goto err_out_format;
5093 ret = rbd_dev_header_watch_sync(rbd_dev);
5095 goto out_header_name;
5098 if (rbd_dev->image_format == 1)
5099 ret = rbd_dev_v1_header_info(rbd_dev);
5101 ret = rbd_dev_v2_header_info(rbd_dev);
5105 ret = rbd_dev_spec_update(rbd_dev);
5109 ret = rbd_dev_probe_parent(rbd_dev);
5113 dout("discovered format %u image, header name is %s\n",
5114 rbd_dev->image_format, rbd_dev->header_name);
5118 rbd_dev_unprobe(rbd_dev);
5121 rbd_dev_header_unwatch_sync(rbd_dev);
5123 kfree(rbd_dev->header_name);
5124 rbd_dev->header_name = NULL;
5126 rbd_dev->image_format = 0;
5127 kfree(rbd_dev->spec->image_id);
5128 rbd_dev->spec->image_id = NULL;
5130 dout("probe failed, returning %d\n", ret);
5135 static ssize_t do_rbd_add(struct bus_type *bus,
5139 struct rbd_device *rbd_dev = NULL;
5140 struct ceph_options *ceph_opts = NULL;
5141 struct rbd_options *rbd_opts = NULL;
5142 struct rbd_spec *spec = NULL;
5143 struct rbd_client *rbdc;
5147 if (!try_module_get(THIS_MODULE))
5150 /* parse add command */
5151 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5153 goto err_out_module;
5154 read_only = rbd_opts->read_only;
5156 rbd_opts = NULL; /* done with this */
5158 rbdc = rbd_get_client(ceph_opts);
5165 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5167 goto err_out_client;
5168 spec->pool_id = (u64)rc;
5170 /* The ceph file layout needs to fit pool id in 32 bits */
5172 if (spec->pool_id > (u64)U32_MAX) {
5173 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5174 (unsigned long long)spec->pool_id, U32_MAX);
5176 goto err_out_client;
5179 rbd_dev = rbd_dev_create(rbdc, spec);
5181 goto err_out_client;
5182 rbdc = NULL; /* rbd_dev now owns this */
5183 spec = NULL; /* rbd_dev now owns this */
5185 rc = rbd_dev_image_probe(rbd_dev, true);
5187 goto err_out_rbd_dev;
5189 /* If we are mapping a snapshot it must be marked read-only */
5191 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5193 rbd_dev->mapping.read_only = read_only;
5195 rc = rbd_dev_device_setup(rbd_dev);
5198 * rbd_dev_header_unwatch_sync() can't be moved into
5199 * rbd_dev_image_release() without refactoring, see
5200 * commit 1f3ef78861ac.
5202 rbd_dev_header_unwatch_sync(rbd_dev);
5203 rbd_dev_image_release(rbd_dev);
5204 goto err_out_module;
5210 rbd_dev_destroy(rbd_dev);
5212 rbd_put_client(rbdc);
5216 module_put(THIS_MODULE);
5218 dout("Error adding device %s\n", buf);
5223 static ssize_t rbd_add(struct bus_type *bus,
5230 return do_rbd_add(bus, buf, count);
5233 static ssize_t rbd_add_single_major(struct bus_type *bus,
5237 return do_rbd_add(bus, buf, count);
5240 static void rbd_dev_device_release(struct device *dev)
5242 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5244 rbd_free_disk(rbd_dev);
5245 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5246 rbd_dev_mapping_clear(rbd_dev);
5248 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5249 rbd_dev_id_put(rbd_dev);
5250 rbd_dev_mapping_clear(rbd_dev);
5253 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5255 while (rbd_dev->parent) {
5256 struct rbd_device *first = rbd_dev;
5257 struct rbd_device *second = first->parent;
5258 struct rbd_device *third;
5261 * Follow to the parent with no grandparent and
5264 while (second && (third = second->parent)) {
5269 rbd_dev_image_release(second);
5270 first->parent = NULL;
5271 first->parent_overlap = 0;
5273 rbd_assert(first->parent_spec);
5274 rbd_spec_put(first->parent_spec);
5275 first->parent_spec = NULL;
5279 static ssize_t do_rbd_remove(struct bus_type *bus,
5283 struct rbd_device *rbd_dev = NULL;
5284 struct list_head *tmp;
5287 bool already = false;
5290 ret = kstrtoul(buf, 10, &ul);
5294 /* convert to int; abort if we lost anything in the conversion */
5300 spin_lock(&rbd_dev_list_lock);
5301 list_for_each(tmp, &rbd_dev_list) {
5302 rbd_dev = list_entry(tmp, struct rbd_device, node);
5303 if (rbd_dev->dev_id == dev_id) {
5309 spin_lock_irq(&rbd_dev->lock);
5310 if (rbd_dev->open_count)
5313 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5315 spin_unlock_irq(&rbd_dev->lock);
5317 spin_unlock(&rbd_dev_list_lock);
5318 if (ret < 0 || already)
5321 rbd_dev_header_unwatch_sync(rbd_dev);
5323 * flush remaining watch callbacks - these must be complete
5324 * before the osd_client is shutdown
5326 dout("%s: flushing notifies", __func__);
5327 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5330 * Don't free anything from rbd_dev->disk until after all
5331 * notifies are completely processed. Otherwise
5332 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5333 * in a potential use after free of rbd_dev->disk or rbd_dev.
5335 rbd_bus_del_dev(rbd_dev);
5336 rbd_dev_image_release(rbd_dev);
5337 module_put(THIS_MODULE);
5342 static ssize_t rbd_remove(struct bus_type *bus,
5349 return do_rbd_remove(bus, buf, count);
5352 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5356 return do_rbd_remove(bus, buf, count);
5360 * create control files in sysfs
5363 static int rbd_sysfs_init(void)
5367 ret = device_register(&rbd_root_dev);
5371 ret = bus_register(&rbd_bus_type);
5373 device_unregister(&rbd_root_dev);
5378 static void rbd_sysfs_cleanup(void)
5380 bus_unregister(&rbd_bus_type);
5381 device_unregister(&rbd_root_dev);
5384 static int rbd_slab_init(void)
5386 rbd_assert(!rbd_img_request_cache);
5387 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5388 sizeof (struct rbd_img_request),
5389 __alignof__(struct rbd_img_request),
5391 if (!rbd_img_request_cache)
5394 rbd_assert(!rbd_obj_request_cache);
5395 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5396 sizeof (struct rbd_obj_request),
5397 __alignof__(struct rbd_obj_request),
5399 if (!rbd_obj_request_cache)
5402 rbd_assert(!rbd_segment_name_cache);
5403 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5404 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5405 if (rbd_segment_name_cache)
5408 if (rbd_obj_request_cache) {
5409 kmem_cache_destroy(rbd_obj_request_cache);
5410 rbd_obj_request_cache = NULL;
5413 kmem_cache_destroy(rbd_img_request_cache);
5414 rbd_img_request_cache = NULL;
5419 static void rbd_slab_exit(void)
5421 rbd_assert(rbd_segment_name_cache);
5422 kmem_cache_destroy(rbd_segment_name_cache);
5423 rbd_segment_name_cache = NULL;
5425 rbd_assert(rbd_obj_request_cache);
5426 kmem_cache_destroy(rbd_obj_request_cache);
5427 rbd_obj_request_cache = NULL;
5429 rbd_assert(rbd_img_request_cache);
5430 kmem_cache_destroy(rbd_img_request_cache);
5431 rbd_img_request_cache = NULL;
5434 static int __init rbd_init(void)
5438 if (!libceph_compatible(NULL)) {
5439 rbd_warn(NULL, "libceph incompatibility (quitting)");
5443 rc = rbd_slab_init();
5448 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5449 if (rbd_major < 0) {
5455 rc = rbd_sysfs_init();
5457 goto err_out_blkdev;
5460 pr_info("loaded (major %d)\n", rbd_major);
5462 pr_info("loaded\n");
5468 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5474 static void __exit rbd_exit(void)
5476 rbd_sysfs_cleanup();
5478 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5482 module_init(rbd_init);
5483 module_exit(rbd_exit);
5485 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5486 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5487 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5488 /* following authorship retained from original osdblk.c */
5489 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5491 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5492 MODULE_LICENSE("GPL");