3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/blk-mq.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
48 #include "rbd_types.h"
50 #define RBD_DEBUG /* Activate rbd_assert() calls */
53 * The basic unit of block I/O is a sector. It is interpreted in a
54 * number of contexts in Linux (blk, bio, genhd), but the default is
55 * universally 512 bytes. These symbols are just slightly more
56 * meaningful than the bare numbers they represent.
58 #define SECTOR_SHIFT 9
59 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
62 * Increment the given counter and return its updated value.
63 * If the counter is already 0 it will not be incremented.
64 * If the counter is already at its maximum value returns
65 * -EINVAL without updating it.
67 static int atomic_inc_return_safe(atomic_t *v)
71 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
72 if (counter <= (unsigned int)INT_MAX)
80 /* Decrement the counter. Return the resulting value, or -EINVAL */
81 static int atomic_dec_return_safe(atomic_t *v)
85 counter = atomic_dec_return(v);
94 #define RBD_DRV_NAME "rbd"
96 #define RBD_MINORS_PER_MAJOR 256
97 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
99 #define RBD_MAX_PARENT_CHAIN_LEN 16
101 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
102 #define RBD_MAX_SNAP_NAME_LEN \
103 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
105 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
107 #define RBD_SNAP_HEAD_NAME "-"
109 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
111 /* This allows a single page to hold an image name sent by OSD */
112 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
113 #define RBD_IMAGE_ID_LEN_MAX 64
115 #define RBD_OBJ_PREFIX_LEN_MAX 64
119 #define RBD_FEATURE_LAYERING (1<<0)
120 #define RBD_FEATURE_STRIPINGV2 (1<<1)
121 #define RBD_FEATURES_ALL \
122 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
124 /* Features supported by this (client software) implementation. */
126 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
129 * An RBD device name will be "rbd#", where the "rbd" comes from
130 * RBD_DRV_NAME above, and # is a unique integer identifier.
131 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
132 * enough to hold all possible device names.
134 #define DEV_NAME_LEN 32
135 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
138 * block device image metadata (in-memory version)
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
148 u64 features; /* Might be changeable someday? */
150 /* The remaining fields need to be updated occasionally */
152 struct ceph_snap_context *snapc;
153 char *snap_names; /* format 1 only */
154 u64 *snap_sizes; /* format 1 only */
158 * An rbd image specification.
160 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
161 * identify an image. Each rbd_dev structure includes a pointer to
162 * an rbd_spec structure that encapsulates this identity.
164 * Each of the id's in an rbd_spec has an associated name. For a
165 * user-mapped image, the names are supplied and the id's associated
166 * with them are looked up. For a layered image, a parent image is
167 * defined by the tuple, and the names are looked up.
169 * An rbd_dev structure contains a parent_spec pointer which is
170 * non-null if the image it represents is a child in a layered
171 * image. This pointer will refer to the rbd_spec structure used
172 * by the parent rbd_dev for its own identity (i.e., the structure
173 * is shared between the parent and child).
175 * Since these structures are populated once, during the discovery
176 * phase of image construction, they are effectively immutable so
177 * we make no effort to synchronize access to them.
179 * Note that code herein does not assume the image name is known (it
180 * could be a null pointer).
184 const char *pool_name;
186 const char *image_id;
187 const char *image_name;
190 const char *snap_name;
196 * an instance of the client. multiple devices may share an rbd client.
199 struct ceph_client *client;
201 struct list_head node;
204 struct rbd_img_request;
205 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
207 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
209 struct rbd_obj_request;
210 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
212 enum obj_request_type {
213 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
216 enum obj_operation_type {
223 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
224 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
225 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
226 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
229 struct rbd_obj_request {
230 const char *object_name;
231 u64 offset; /* object start byte */
232 u64 length; /* bytes from offset */
236 * An object request associated with an image will have its
237 * img_data flag set; a standalone object request will not.
239 * A standalone object request will have which == BAD_WHICH
240 * and a null obj_request pointer.
242 * An object request initiated in support of a layered image
243 * object (to check for its existence before a write) will
244 * have which == BAD_WHICH and a non-null obj_request pointer.
246 * Finally, an object request for rbd image data will have
247 * which != BAD_WHICH, and will have a non-null img_request
248 * pointer. The value of which will be in the range
249 * 0..(img_request->obj_request_count-1).
252 struct rbd_obj_request *obj_request; /* STAT op */
254 struct rbd_img_request *img_request;
256 /* links for img_request->obj_requests list */
257 struct list_head links;
260 u32 which; /* posn image request list */
262 enum obj_request_type type;
264 struct bio *bio_list;
270 struct page **copyup_pages;
271 u32 copyup_page_count;
273 struct ceph_osd_request *osd_req;
275 u64 xferred; /* bytes transferred */
278 rbd_obj_callback_t callback;
279 struct completion completion;
285 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
286 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
287 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
288 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
291 struct rbd_img_request {
292 struct rbd_device *rbd_dev;
293 u64 offset; /* starting image byte offset */
294 u64 length; /* byte count from offset */
297 u64 snap_id; /* for reads */
298 struct ceph_snap_context *snapc; /* for writes */
301 struct request *rq; /* block request */
302 struct rbd_obj_request *obj_request; /* obj req initiator */
304 struct page **copyup_pages;
305 u32 copyup_page_count;
306 spinlock_t completion_lock;/* protects next_completion */
308 rbd_img_callback_t callback;
309 u64 xferred;/* aggregate bytes transferred */
310 int result; /* first nonzero obj_request result */
312 u32 obj_request_count;
313 struct list_head obj_requests; /* rbd_obj_request structs */
318 #define for_each_obj_request(ireq, oreq) \
319 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
320 #define for_each_obj_request_from(ireq, oreq) \
321 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
322 #define for_each_obj_request_safe(ireq, oreq, n) \
323 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
335 int dev_id; /* blkdev unique id */
337 int major; /* blkdev assigned major */
339 struct gendisk *disk; /* blkdev's gendisk and rq */
341 u32 image_format; /* Either 1 or 2 */
342 struct rbd_client *rbd_client;
344 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
346 spinlock_t lock; /* queue, flags, open_count */
348 struct rbd_image_header header;
349 unsigned long flags; /* possibly lock protected */
350 struct rbd_spec *spec;
351 struct rbd_options *opts;
355 struct ceph_file_layout layout;
357 struct ceph_osd_event *watch_event;
358 struct rbd_obj_request *watch_request;
360 struct rbd_spec *parent_spec;
363 struct rbd_device *parent;
365 /* Block layer tags. */
366 struct blk_mq_tag_set tag_set;
368 /* protects updating the header */
369 struct rw_semaphore header_rwsem;
371 struct rbd_mapping mapping;
373 struct list_head node;
377 unsigned long open_count; /* protected by lock */
381 * Flag bits for rbd_dev->flags. If atomicity is required,
382 * rbd_dev->lock is used to protect access.
384 * Currently, only the "removing" flag (which is coupled with the
385 * "open_count" field) requires atomic access.
388 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
389 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
392 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
394 static LIST_HEAD(rbd_dev_list); /* devices */
395 static DEFINE_SPINLOCK(rbd_dev_list_lock);
397 static LIST_HEAD(rbd_client_list); /* clients */
398 static DEFINE_SPINLOCK(rbd_client_list_lock);
400 /* Slab caches for frequently-allocated structures */
402 static struct kmem_cache *rbd_img_request_cache;
403 static struct kmem_cache *rbd_obj_request_cache;
404 static struct kmem_cache *rbd_segment_name_cache;
406 static int rbd_major;
407 static DEFINE_IDA(rbd_dev_id_ida);
409 static struct workqueue_struct *rbd_wq;
412 * Default to false for now, as single-major requires >= 0.75 version of
413 * userspace rbd utility.
415 static bool single_major = false;
416 module_param(single_major, bool, S_IRUGO);
417 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
419 static int rbd_img_request_submit(struct rbd_img_request *img_request);
421 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
423 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
425 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
427 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
429 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
430 static void rbd_spec_put(struct rbd_spec *spec);
432 static int rbd_dev_id_to_minor(int dev_id)
434 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
437 static int minor_to_rbd_dev_id(int minor)
439 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
442 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
443 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
444 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
445 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
447 static struct attribute *rbd_bus_attrs[] = {
449 &bus_attr_remove.attr,
450 &bus_attr_add_single_major.attr,
451 &bus_attr_remove_single_major.attr,
455 static umode_t rbd_bus_is_visible(struct kobject *kobj,
456 struct attribute *attr, int index)
459 (attr == &bus_attr_add_single_major.attr ||
460 attr == &bus_attr_remove_single_major.attr))
466 static const struct attribute_group rbd_bus_group = {
467 .attrs = rbd_bus_attrs,
468 .is_visible = rbd_bus_is_visible,
470 __ATTRIBUTE_GROUPS(rbd_bus);
472 static struct bus_type rbd_bus_type = {
474 .bus_groups = rbd_bus_groups,
477 static void rbd_root_dev_release(struct device *dev)
481 static struct device rbd_root_dev = {
483 .release = rbd_root_dev_release,
486 static __printf(2, 3)
487 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
489 struct va_format vaf;
497 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
498 else if (rbd_dev->disk)
499 printk(KERN_WARNING "%s: %s: %pV\n",
500 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
501 else if (rbd_dev->spec && rbd_dev->spec->image_name)
502 printk(KERN_WARNING "%s: image %s: %pV\n",
503 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
504 else if (rbd_dev->spec && rbd_dev->spec->image_id)
505 printk(KERN_WARNING "%s: id %s: %pV\n",
506 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
508 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
509 RBD_DRV_NAME, rbd_dev, &vaf);
514 #define rbd_assert(expr) \
515 if (unlikely(!(expr))) { \
516 printk(KERN_ERR "\nAssertion failure in %s() " \
518 "\trbd_assert(%s);\n\n", \
519 __func__, __LINE__, #expr); \
522 #else /* !RBD_DEBUG */
523 # define rbd_assert(expr) ((void) 0)
524 #endif /* !RBD_DEBUG */
526 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
527 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
528 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
529 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
531 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
532 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
533 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
534 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
535 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
537 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size);
539 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
542 static int rbd_open(struct block_device *bdev, fmode_t mode)
544 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
545 bool removing = false;
547 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
550 spin_lock_irq(&rbd_dev->lock);
551 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
554 rbd_dev->open_count++;
555 spin_unlock_irq(&rbd_dev->lock);
559 (void) get_device(&rbd_dev->dev);
564 static void rbd_release(struct gendisk *disk, fmode_t mode)
566 struct rbd_device *rbd_dev = disk->private_data;
567 unsigned long open_count_before;
569 spin_lock_irq(&rbd_dev->lock);
570 open_count_before = rbd_dev->open_count--;
571 spin_unlock_irq(&rbd_dev->lock);
572 rbd_assert(open_count_before > 0);
574 put_device(&rbd_dev->dev);
577 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
582 bool ro_changed = false;
584 /* get_user() may sleep, so call it before taking rbd_dev->lock */
585 if (get_user(val, (int __user *)(arg)))
588 ro = val ? true : false;
589 /* Snapshot doesn't allow to write*/
590 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
593 spin_lock_irq(&rbd_dev->lock);
594 /* prevent others open this device */
595 if (rbd_dev->open_count > 1) {
600 if (rbd_dev->mapping.read_only != ro) {
601 rbd_dev->mapping.read_only = ro;
606 spin_unlock_irq(&rbd_dev->lock);
607 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
608 if (ret == 0 && ro_changed)
609 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
614 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
615 unsigned int cmd, unsigned long arg)
617 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
622 ret = rbd_ioctl_set_ro(rbd_dev, arg);
632 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
633 unsigned int cmd, unsigned long arg)
635 return rbd_ioctl(bdev, mode, cmd, arg);
637 #endif /* CONFIG_COMPAT */
639 static const struct block_device_operations rbd_bd_ops = {
640 .owner = THIS_MODULE,
642 .release = rbd_release,
645 .compat_ioctl = rbd_compat_ioctl,
650 * Initialize an rbd client instance. Success or not, this function
651 * consumes ceph_opts. Caller holds client_mutex.
653 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
655 struct rbd_client *rbdc;
658 dout("%s:\n", __func__);
659 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
663 kref_init(&rbdc->kref);
664 INIT_LIST_HEAD(&rbdc->node);
666 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
667 if (IS_ERR(rbdc->client))
669 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
671 ret = ceph_open_session(rbdc->client);
675 spin_lock(&rbd_client_list_lock);
676 list_add_tail(&rbdc->node, &rbd_client_list);
677 spin_unlock(&rbd_client_list_lock);
679 dout("%s: rbdc %p\n", __func__, rbdc);
683 ceph_destroy_client(rbdc->client);
688 ceph_destroy_options(ceph_opts);
689 dout("%s: error %d\n", __func__, ret);
694 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
696 kref_get(&rbdc->kref);
702 * Find a ceph client with specific addr and configuration. If
703 * found, bump its reference count.
705 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
707 struct rbd_client *client_node;
710 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
713 spin_lock(&rbd_client_list_lock);
714 list_for_each_entry(client_node, &rbd_client_list, node) {
715 if (!ceph_compare_options(ceph_opts, client_node->client)) {
716 __rbd_get_client(client_node);
722 spin_unlock(&rbd_client_list_lock);
724 return found ? client_node : NULL;
728 * (Per device) rbd map options
735 /* string args above */
741 static match_table_t rbd_opts_tokens = {
742 {Opt_queue_depth, "queue_depth=%d"},
744 /* string args above */
745 {Opt_read_only, "read_only"},
746 {Opt_read_only, "ro"}, /* Alternate spelling */
747 {Opt_read_write, "read_write"},
748 {Opt_read_write, "rw"}, /* Alternate spelling */
757 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
758 #define RBD_READ_ONLY_DEFAULT false
760 static int parse_rbd_opts_token(char *c, void *private)
762 struct rbd_options *rbd_opts = private;
763 substring_t argstr[MAX_OPT_ARGS];
764 int token, intval, ret;
766 token = match_token(c, rbd_opts_tokens, argstr);
767 if (token < Opt_last_int) {
768 ret = match_int(&argstr[0], &intval);
770 pr_err("bad mount option arg (not int) at '%s'\n", c);
773 dout("got int token %d val %d\n", token, intval);
774 } else if (token > Opt_last_int && token < Opt_last_string) {
775 dout("got string token %d val %s\n", token, argstr[0].from);
777 dout("got token %d\n", token);
781 case Opt_queue_depth:
783 pr_err("queue_depth out of range\n");
786 rbd_opts->queue_depth = intval;
789 rbd_opts->read_only = true;
792 rbd_opts->read_only = false;
795 /* libceph prints "bad option" msg */
802 static char* obj_op_name(enum obj_operation_type op_type)
817 * Get a ceph client with specific addr and configuration, if one does
818 * not exist create it. Either way, ceph_opts is consumed by this
821 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
823 struct rbd_client *rbdc;
825 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
826 rbdc = rbd_client_find(ceph_opts);
827 if (rbdc) /* using an existing client */
828 ceph_destroy_options(ceph_opts);
830 rbdc = rbd_client_create(ceph_opts);
831 mutex_unlock(&client_mutex);
837 * Destroy ceph client
839 * Caller must hold rbd_client_list_lock.
841 static void rbd_client_release(struct kref *kref)
843 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
845 dout("%s: rbdc %p\n", __func__, rbdc);
846 spin_lock(&rbd_client_list_lock);
847 list_del(&rbdc->node);
848 spin_unlock(&rbd_client_list_lock);
850 ceph_destroy_client(rbdc->client);
855 * Drop reference to ceph client node. If it's not referenced anymore, release
858 static void rbd_put_client(struct rbd_client *rbdc)
861 kref_put(&rbdc->kref, rbd_client_release);
864 static bool rbd_image_format_valid(u32 image_format)
866 return image_format == 1 || image_format == 2;
869 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
874 /* The header has to start with the magic rbd header text */
875 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
878 /* The bio layer requires at least sector-sized I/O */
880 if (ondisk->options.order < SECTOR_SHIFT)
883 /* If we use u64 in a few spots we may be able to loosen this */
885 if (ondisk->options.order > 8 * sizeof (int) - 1)
889 * The size of a snapshot header has to fit in a size_t, and
890 * that limits the number of snapshots.
892 snap_count = le32_to_cpu(ondisk->snap_count);
893 size = SIZE_MAX - sizeof (struct ceph_snap_context);
894 if (snap_count > size / sizeof (__le64))
898 * Not only that, but the size of the entire the snapshot
899 * header must also be representable in a size_t.
901 size -= snap_count * sizeof (__le64);
902 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
909 * Fill an rbd image header with information from the given format 1
912 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
913 struct rbd_image_header_ondisk *ondisk)
915 struct rbd_image_header *header = &rbd_dev->header;
916 bool first_time = header->object_prefix == NULL;
917 struct ceph_snap_context *snapc;
918 char *object_prefix = NULL;
919 char *snap_names = NULL;
920 u64 *snap_sizes = NULL;
926 /* Allocate this now to avoid having to handle failure below */
931 len = strnlen(ondisk->object_prefix,
932 sizeof (ondisk->object_prefix));
933 object_prefix = kmalloc(len + 1, GFP_KERNEL);
936 memcpy(object_prefix, ondisk->object_prefix, len);
937 object_prefix[len] = '\0';
940 /* Allocate the snapshot context and fill it in */
942 snap_count = le32_to_cpu(ondisk->snap_count);
943 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
946 snapc->seq = le64_to_cpu(ondisk->snap_seq);
948 struct rbd_image_snap_ondisk *snaps;
949 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
951 /* We'll keep a copy of the snapshot names... */
953 if (snap_names_len > (u64)SIZE_MAX)
955 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
959 /* ...as well as the array of their sizes. */
961 size = snap_count * sizeof (*header->snap_sizes);
962 snap_sizes = kmalloc(size, GFP_KERNEL);
967 * Copy the names, and fill in each snapshot's id
970 * Note that rbd_dev_v1_header_info() guarantees the
971 * ondisk buffer we're working with has
972 * snap_names_len bytes beyond the end of the
973 * snapshot id array, this memcpy() is safe.
975 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
976 snaps = ondisk->snaps;
977 for (i = 0; i < snap_count; i++) {
978 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
979 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
983 /* We won't fail any more, fill in the header */
986 header->object_prefix = object_prefix;
987 header->obj_order = ondisk->options.order;
988 header->crypt_type = ondisk->options.crypt_type;
989 header->comp_type = ondisk->options.comp_type;
990 /* The rest aren't used for format 1 images */
991 header->stripe_unit = 0;
992 header->stripe_count = 0;
993 header->features = 0;
995 ceph_put_snap_context(header->snapc);
996 kfree(header->snap_names);
997 kfree(header->snap_sizes);
1000 /* The remaining fields always get updated (when we refresh) */
1002 header->image_size = le64_to_cpu(ondisk->image_size);
1003 header->snapc = snapc;
1004 header->snap_names = snap_names;
1005 header->snap_sizes = snap_sizes;
1013 ceph_put_snap_context(snapc);
1014 kfree(object_prefix);
1019 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1021 const char *snap_name;
1023 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1025 /* Skip over names until we find the one we are looking for */
1027 snap_name = rbd_dev->header.snap_names;
1029 snap_name += strlen(snap_name) + 1;
1031 return kstrdup(snap_name, GFP_KERNEL);
1035 * Snapshot id comparison function for use with qsort()/bsearch().
1036 * Note that result is for snapshots in *descending* order.
1038 static int snapid_compare_reverse(const void *s1, const void *s2)
1040 u64 snap_id1 = *(u64 *)s1;
1041 u64 snap_id2 = *(u64 *)s2;
1043 if (snap_id1 < snap_id2)
1045 return snap_id1 == snap_id2 ? 0 : -1;
1049 * Search a snapshot context to see if the given snapshot id is
1052 * Returns the position of the snapshot id in the array if it's found,
1053 * or BAD_SNAP_INDEX otherwise.
1055 * Note: The snapshot array is in kept sorted (by the osd) in
1056 * reverse order, highest snapshot id first.
1058 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1060 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1063 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1064 sizeof (snap_id), snapid_compare_reverse);
1066 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1069 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1073 const char *snap_name;
1075 which = rbd_dev_snap_index(rbd_dev, snap_id);
1076 if (which == BAD_SNAP_INDEX)
1077 return ERR_PTR(-ENOENT);
1079 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1080 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1083 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1085 if (snap_id == CEPH_NOSNAP)
1086 return RBD_SNAP_HEAD_NAME;
1088 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1089 if (rbd_dev->image_format == 1)
1090 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1092 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1095 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1098 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1099 if (snap_id == CEPH_NOSNAP) {
1100 *snap_size = rbd_dev->header.image_size;
1101 } else if (rbd_dev->image_format == 1) {
1104 which = rbd_dev_snap_index(rbd_dev, snap_id);
1105 if (which == BAD_SNAP_INDEX)
1108 *snap_size = rbd_dev->header.snap_sizes[which];
1113 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1122 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1125 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1126 if (snap_id == CEPH_NOSNAP) {
1127 *snap_features = rbd_dev->header.features;
1128 } else if (rbd_dev->image_format == 1) {
1129 *snap_features = 0; /* No features for format 1 */
1134 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1138 *snap_features = features;
1143 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1145 u64 snap_id = rbd_dev->spec->snap_id;
1150 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1153 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1157 rbd_dev->mapping.size = size;
1158 rbd_dev->mapping.features = features;
1163 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1165 rbd_dev->mapping.size = 0;
1166 rbd_dev->mapping.features = 0;
1169 static void rbd_segment_name_free(const char *name)
1171 /* The explicit cast here is needed to drop the const qualifier */
1173 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1176 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1183 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1186 segment = offset >> rbd_dev->header.obj_order;
1187 name_format = "%s.%012llx";
1188 if (rbd_dev->image_format == 2)
1189 name_format = "%s.%016llx";
1190 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1191 rbd_dev->header.object_prefix, segment);
1192 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1193 pr_err("error formatting segment name for #%llu (%d)\n",
1195 rbd_segment_name_free(name);
1202 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1204 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1206 return offset & (segment_size - 1);
1209 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1210 u64 offset, u64 length)
1212 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1214 offset &= segment_size - 1;
1216 rbd_assert(length <= U64_MAX - offset);
1217 if (offset + length > segment_size)
1218 length = segment_size - offset;
1224 * returns the size of an object in the image
1226 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1228 return 1 << header->obj_order;
1235 static void bio_chain_put(struct bio *chain)
1241 chain = chain->bi_next;
1247 * zeros a bio chain, starting at specific offset
1249 static void zero_bio_chain(struct bio *chain, int start_ofs)
1252 struct bvec_iter iter;
1253 unsigned long flags;
1258 bio_for_each_segment(bv, chain, iter) {
1259 if (pos + bv.bv_len > start_ofs) {
1260 int remainder = max(start_ofs - pos, 0);
1261 buf = bvec_kmap_irq(&bv, &flags);
1262 memset(buf + remainder, 0,
1263 bv.bv_len - remainder);
1264 flush_dcache_page(bv.bv_page);
1265 bvec_kunmap_irq(buf, &flags);
1270 chain = chain->bi_next;
1275 * similar to zero_bio_chain(), zeros data defined by a page array,
1276 * starting at the given byte offset from the start of the array and
1277 * continuing up to the given end offset. The pages array is
1278 * assumed to be big enough to hold all bytes up to the end.
1280 static void zero_pages(struct page **pages, u64 offset, u64 end)
1282 struct page **page = &pages[offset >> PAGE_SHIFT];
1284 rbd_assert(end > offset);
1285 rbd_assert(end - offset <= (u64)SIZE_MAX);
1286 while (offset < end) {
1289 unsigned long flags;
1292 page_offset = offset & ~PAGE_MASK;
1293 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1294 local_irq_save(flags);
1295 kaddr = kmap_atomic(*page);
1296 memset(kaddr + page_offset, 0, length);
1297 flush_dcache_page(*page);
1298 kunmap_atomic(kaddr);
1299 local_irq_restore(flags);
1307 * Clone a portion of a bio, starting at the given byte offset
1308 * and continuing for the number of bytes indicated.
1310 static struct bio *bio_clone_range(struct bio *bio_src,
1311 unsigned int offset,
1317 bio = bio_clone(bio_src, gfpmask);
1319 return NULL; /* ENOMEM */
1321 bio_advance(bio, offset);
1322 bio->bi_iter.bi_size = len;
1328 * Clone a portion of a bio chain, starting at the given byte offset
1329 * into the first bio in the source chain and continuing for the
1330 * number of bytes indicated. The result is another bio chain of
1331 * exactly the given length, or a null pointer on error.
1333 * The bio_src and offset parameters are both in-out. On entry they
1334 * refer to the first source bio and the offset into that bio where
1335 * the start of data to be cloned is located.
1337 * On return, bio_src is updated to refer to the bio in the source
1338 * chain that contains first un-cloned byte, and *offset will
1339 * contain the offset of that byte within that bio.
1341 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1342 unsigned int *offset,
1346 struct bio *bi = *bio_src;
1347 unsigned int off = *offset;
1348 struct bio *chain = NULL;
1351 /* Build up a chain of clone bios up to the limit */
1353 if (!bi || off >= bi->bi_iter.bi_size || !len)
1354 return NULL; /* Nothing to clone */
1358 unsigned int bi_size;
1362 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1363 goto out_err; /* EINVAL; ran out of bio's */
1365 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1366 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1368 goto out_err; /* ENOMEM */
1371 end = &bio->bi_next;
1374 if (off == bi->bi_iter.bi_size) {
1385 bio_chain_put(chain);
1391 * The default/initial value for all object request flags is 0. For
1392 * each flag, once its value is set to 1 it is never reset to 0
1395 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1397 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1398 struct rbd_device *rbd_dev;
1400 rbd_dev = obj_request->img_request->rbd_dev;
1401 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1406 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1409 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1412 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1414 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1415 struct rbd_device *rbd_dev = NULL;
1417 if (obj_request_img_data_test(obj_request))
1418 rbd_dev = obj_request->img_request->rbd_dev;
1419 rbd_warn(rbd_dev, "obj_request %p already marked done",
1424 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1427 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1431 * This sets the KNOWN flag after (possibly) setting the EXISTS
1432 * flag. The latter is set based on the "exists" value provided.
1434 * Note that for our purposes once an object exists it never goes
1435 * away again. It's possible that the response from two existence
1436 * checks are separated by the creation of the target object, and
1437 * the first ("doesn't exist") response arrives *after* the second
1438 * ("does exist"). In that case we ignore the second one.
1440 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1444 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1445 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1449 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1452 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1455 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1458 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1461 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1463 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1465 return obj_request->img_offset <
1466 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1469 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1471 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1472 atomic_read(&obj_request->kref.refcount));
1473 kref_get(&obj_request->kref);
1476 static void rbd_obj_request_destroy(struct kref *kref);
1477 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1479 rbd_assert(obj_request != NULL);
1480 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1481 atomic_read(&obj_request->kref.refcount));
1482 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1485 static void rbd_img_request_get(struct rbd_img_request *img_request)
1487 dout("%s: img %p (was %d)\n", __func__, img_request,
1488 atomic_read(&img_request->kref.refcount));
1489 kref_get(&img_request->kref);
1492 static bool img_request_child_test(struct rbd_img_request *img_request);
1493 static void rbd_parent_request_destroy(struct kref *kref);
1494 static void rbd_img_request_destroy(struct kref *kref);
1495 static void rbd_img_request_put(struct rbd_img_request *img_request)
1497 rbd_assert(img_request != NULL);
1498 dout("%s: img %p (was %d)\n", __func__, img_request,
1499 atomic_read(&img_request->kref.refcount));
1500 if (img_request_child_test(img_request))
1501 kref_put(&img_request->kref, rbd_parent_request_destroy);
1503 kref_put(&img_request->kref, rbd_img_request_destroy);
1506 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1507 struct rbd_obj_request *obj_request)
1509 rbd_assert(obj_request->img_request == NULL);
1511 /* Image request now owns object's original reference */
1512 obj_request->img_request = img_request;
1513 obj_request->which = img_request->obj_request_count;
1514 rbd_assert(!obj_request_img_data_test(obj_request));
1515 obj_request_img_data_set(obj_request);
1516 rbd_assert(obj_request->which != BAD_WHICH);
1517 img_request->obj_request_count++;
1518 list_add_tail(&obj_request->links, &img_request->obj_requests);
1519 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1520 obj_request->which);
1523 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1524 struct rbd_obj_request *obj_request)
1526 rbd_assert(obj_request->which != BAD_WHICH);
1528 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1529 obj_request->which);
1530 list_del(&obj_request->links);
1531 rbd_assert(img_request->obj_request_count > 0);
1532 img_request->obj_request_count--;
1533 rbd_assert(obj_request->which == img_request->obj_request_count);
1534 obj_request->which = BAD_WHICH;
1535 rbd_assert(obj_request_img_data_test(obj_request));
1536 rbd_assert(obj_request->img_request == img_request);
1537 obj_request->img_request = NULL;
1538 obj_request->callback = NULL;
1539 rbd_obj_request_put(obj_request);
1542 static bool obj_request_type_valid(enum obj_request_type type)
1545 case OBJ_REQUEST_NODATA:
1546 case OBJ_REQUEST_BIO:
1547 case OBJ_REQUEST_PAGES:
1554 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1555 struct rbd_obj_request *obj_request)
1557 dout("%s %p\n", __func__, obj_request);
1558 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1561 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1563 dout("%s %p\n", __func__, obj_request);
1564 ceph_osdc_cancel_request(obj_request->osd_req);
1568 * Wait for an object request to complete. If interrupted, cancel the
1569 * underlying osd request.
1571 * @timeout: in jiffies, 0 means "wait forever"
1573 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1574 unsigned long timeout)
1578 dout("%s %p\n", __func__, obj_request);
1579 ret = wait_for_completion_interruptible_timeout(
1580 &obj_request->completion,
1581 ceph_timeout_jiffies(timeout));
1585 rbd_obj_request_end(obj_request);
1590 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1594 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1596 return __rbd_obj_request_wait(obj_request, 0);
1599 static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1600 unsigned long timeout)
1602 return __rbd_obj_request_wait(obj_request, timeout);
1605 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1608 dout("%s: img %p\n", __func__, img_request);
1611 * If no error occurred, compute the aggregate transfer
1612 * count for the image request. We could instead use
1613 * atomic64_cmpxchg() to update it as each object request
1614 * completes; not clear which way is better off hand.
1616 if (!img_request->result) {
1617 struct rbd_obj_request *obj_request;
1620 for_each_obj_request(img_request, obj_request)
1621 xferred += obj_request->xferred;
1622 img_request->xferred = xferred;
1625 if (img_request->callback)
1626 img_request->callback(img_request);
1628 rbd_img_request_put(img_request);
1632 * The default/initial value for all image request flags is 0. Each
1633 * is conditionally set to 1 at image request initialization time
1634 * and currently never change thereafter.
1636 static void img_request_write_set(struct rbd_img_request *img_request)
1638 set_bit(IMG_REQ_WRITE, &img_request->flags);
1642 static bool img_request_write_test(struct rbd_img_request *img_request)
1645 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1649 * Set the discard flag when the img_request is an discard request
1651 static void img_request_discard_set(struct rbd_img_request *img_request)
1653 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1657 static bool img_request_discard_test(struct rbd_img_request *img_request)
1660 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1663 static void img_request_child_set(struct rbd_img_request *img_request)
1665 set_bit(IMG_REQ_CHILD, &img_request->flags);
1669 static void img_request_child_clear(struct rbd_img_request *img_request)
1671 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1675 static bool img_request_child_test(struct rbd_img_request *img_request)
1678 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1681 static void img_request_layered_set(struct rbd_img_request *img_request)
1683 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1687 static void img_request_layered_clear(struct rbd_img_request *img_request)
1689 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1693 static bool img_request_layered_test(struct rbd_img_request *img_request)
1696 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1699 static enum obj_operation_type
1700 rbd_img_request_op_type(struct rbd_img_request *img_request)
1702 if (img_request_write_test(img_request))
1703 return OBJ_OP_WRITE;
1704 else if (img_request_discard_test(img_request))
1705 return OBJ_OP_DISCARD;
1711 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1713 u64 xferred = obj_request->xferred;
1714 u64 length = obj_request->length;
1716 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1717 obj_request, obj_request->img_request, obj_request->result,
1720 * ENOENT means a hole in the image. We zero-fill the entire
1721 * length of the request. A short read also implies zero-fill
1722 * to the end of the request. An error requires the whole
1723 * length of the request to be reported finished with an error
1724 * to the block layer. In each case we update the xferred
1725 * count to indicate the whole request was satisfied.
1727 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1728 if (obj_request->result == -ENOENT) {
1729 if (obj_request->type == OBJ_REQUEST_BIO)
1730 zero_bio_chain(obj_request->bio_list, 0);
1732 zero_pages(obj_request->pages, 0, length);
1733 obj_request->result = 0;
1734 } else if (xferred < length && !obj_request->result) {
1735 if (obj_request->type == OBJ_REQUEST_BIO)
1736 zero_bio_chain(obj_request->bio_list, xferred);
1738 zero_pages(obj_request->pages, xferred, length);
1740 obj_request->xferred = length;
1741 obj_request_done_set(obj_request);
1744 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1746 dout("%s: obj %p cb %p\n", __func__, obj_request,
1747 obj_request->callback);
1748 if (obj_request->callback)
1749 obj_request->callback(obj_request);
1751 complete_all(&obj_request->completion);
1754 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1756 dout("%s: obj %p\n", __func__, obj_request);
1757 obj_request_done_set(obj_request);
1760 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1762 struct rbd_img_request *img_request = NULL;
1763 struct rbd_device *rbd_dev = NULL;
1764 bool layered = false;
1766 if (obj_request_img_data_test(obj_request)) {
1767 img_request = obj_request->img_request;
1768 layered = img_request && img_request_layered_test(img_request);
1769 rbd_dev = img_request->rbd_dev;
1772 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1773 obj_request, img_request, obj_request->result,
1774 obj_request->xferred, obj_request->length);
1775 if (layered && obj_request->result == -ENOENT &&
1776 obj_request->img_offset < rbd_dev->parent_overlap)
1777 rbd_img_parent_read(obj_request);
1778 else if (img_request)
1779 rbd_img_obj_request_read_callback(obj_request);
1781 obj_request_done_set(obj_request);
1784 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1786 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1787 obj_request->result, obj_request->length);
1789 * There is no such thing as a successful short write. Set
1790 * it to our originally-requested length.
1792 obj_request->xferred = obj_request->length;
1793 obj_request_done_set(obj_request);
1796 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1798 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1799 obj_request->result, obj_request->length);
1801 * There is no such thing as a successful short discard. Set
1802 * it to our originally-requested length.
1804 obj_request->xferred = obj_request->length;
1805 /* discarding a non-existent object is not a problem */
1806 if (obj_request->result == -ENOENT)
1807 obj_request->result = 0;
1808 obj_request_done_set(obj_request);
1812 * For a simple stat call there's nothing to do. We'll do more if
1813 * this is part of a write sequence for a layered image.
1815 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1817 dout("%s: obj %p\n", __func__, obj_request);
1818 obj_request_done_set(obj_request);
1821 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1823 dout("%s: obj %p\n", __func__, obj_request);
1825 if (obj_request_img_data_test(obj_request))
1826 rbd_osd_copyup_callback(obj_request);
1828 obj_request_done_set(obj_request);
1831 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1832 struct ceph_msg *msg)
1834 struct rbd_obj_request *obj_request = osd_req->r_priv;
1837 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1838 rbd_assert(osd_req == obj_request->osd_req);
1839 if (obj_request_img_data_test(obj_request)) {
1840 rbd_assert(obj_request->img_request);
1841 rbd_assert(obj_request->which != BAD_WHICH);
1843 rbd_assert(obj_request->which == BAD_WHICH);
1846 if (osd_req->r_result < 0)
1847 obj_request->result = osd_req->r_result;
1850 * We support a 64-bit length, but ultimately it has to be
1851 * passed to the block layer, which just supports a 32-bit
1854 obj_request->xferred = osd_req->r_ops[0].outdata_len;
1855 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1857 opcode = osd_req->r_ops[0].op;
1859 case CEPH_OSD_OP_READ:
1860 rbd_osd_read_callback(obj_request);
1862 case CEPH_OSD_OP_SETALLOCHINT:
1863 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1864 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1866 case CEPH_OSD_OP_WRITE:
1867 case CEPH_OSD_OP_WRITEFULL:
1868 rbd_osd_write_callback(obj_request);
1870 case CEPH_OSD_OP_STAT:
1871 rbd_osd_stat_callback(obj_request);
1873 case CEPH_OSD_OP_DELETE:
1874 case CEPH_OSD_OP_TRUNCATE:
1875 case CEPH_OSD_OP_ZERO:
1876 rbd_osd_discard_callback(obj_request);
1878 case CEPH_OSD_OP_CALL:
1879 rbd_osd_call_callback(obj_request);
1881 case CEPH_OSD_OP_NOTIFY_ACK:
1882 case CEPH_OSD_OP_WATCH:
1883 rbd_osd_trivial_callback(obj_request);
1886 rbd_warn(NULL, "%s: unsupported op %hu",
1887 obj_request->object_name, (unsigned short) opcode);
1891 if (obj_request_done_test(obj_request))
1892 rbd_obj_request_complete(obj_request);
1895 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1897 struct rbd_img_request *img_request = obj_request->img_request;
1898 struct ceph_osd_request *osd_req = obj_request->osd_req;
1901 rbd_assert(osd_req != NULL);
1903 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1904 ceph_osdc_build_request(osd_req, obj_request->offset,
1905 NULL, snap_id, NULL);
1908 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1910 struct rbd_img_request *img_request = obj_request->img_request;
1911 struct ceph_osd_request *osd_req = obj_request->osd_req;
1912 struct ceph_snap_context *snapc;
1913 struct timespec mtime = CURRENT_TIME;
1915 rbd_assert(osd_req != NULL);
1917 snapc = img_request ? img_request->snapc : NULL;
1918 ceph_osdc_build_request(osd_req, obj_request->offset,
1919 snapc, CEPH_NOSNAP, &mtime);
1923 * Create an osd request. A read request has one osd op (read).
1924 * A write request has either one (watch) or two (hint+write) osd ops.
1925 * (All rbd data writes are prefixed with an allocation hint op, but
1926 * technically osd watch is a write request, hence this distinction.)
1928 static struct ceph_osd_request *rbd_osd_req_create(
1929 struct rbd_device *rbd_dev,
1930 enum obj_operation_type op_type,
1931 unsigned int num_ops,
1932 struct rbd_obj_request *obj_request)
1934 struct ceph_snap_context *snapc = NULL;
1935 struct ceph_osd_client *osdc;
1936 struct ceph_osd_request *osd_req;
1938 if (obj_request_img_data_test(obj_request) &&
1939 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1940 struct rbd_img_request *img_request = obj_request->img_request;
1941 if (op_type == OBJ_OP_WRITE) {
1942 rbd_assert(img_request_write_test(img_request));
1944 rbd_assert(img_request_discard_test(img_request));
1946 snapc = img_request->snapc;
1949 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1951 /* Allocate and initialize the request, for the num_ops ops */
1953 osdc = &rbd_dev->rbd_client->client->osdc;
1954 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1957 return NULL; /* ENOMEM */
1959 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
1960 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1962 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1964 osd_req->r_callback = rbd_osd_req_callback;
1965 osd_req->r_priv = obj_request;
1967 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1968 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1974 * Create a copyup osd request based on the information in the object
1975 * request supplied. A copyup request has two or three osd ops, a
1976 * copyup method call, potentially a hint op, and a write or truncate
1979 static struct ceph_osd_request *
1980 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1982 struct rbd_img_request *img_request;
1983 struct ceph_snap_context *snapc;
1984 struct rbd_device *rbd_dev;
1985 struct ceph_osd_client *osdc;
1986 struct ceph_osd_request *osd_req;
1987 int num_osd_ops = 3;
1989 rbd_assert(obj_request_img_data_test(obj_request));
1990 img_request = obj_request->img_request;
1991 rbd_assert(img_request);
1992 rbd_assert(img_request_write_test(img_request) ||
1993 img_request_discard_test(img_request));
1995 if (img_request_discard_test(img_request))
1998 /* Allocate and initialize the request, for all the ops */
2000 snapc = img_request->snapc;
2001 rbd_dev = img_request->rbd_dev;
2002 osdc = &rbd_dev->rbd_client->client->osdc;
2003 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2006 return NULL; /* ENOMEM */
2008 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2009 osd_req->r_callback = rbd_osd_req_callback;
2010 osd_req->r_priv = obj_request;
2012 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
2013 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
2019 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2021 ceph_osdc_put_request(osd_req);
2024 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2026 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2027 u64 offset, u64 length,
2028 enum obj_request_type type)
2030 struct rbd_obj_request *obj_request;
2034 rbd_assert(obj_request_type_valid(type));
2036 size = strlen(object_name) + 1;
2037 name = kmalloc(size, GFP_NOIO);
2041 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2047 obj_request->object_name = memcpy(name, object_name, size);
2048 obj_request->offset = offset;
2049 obj_request->length = length;
2050 obj_request->flags = 0;
2051 obj_request->which = BAD_WHICH;
2052 obj_request->type = type;
2053 INIT_LIST_HEAD(&obj_request->links);
2054 init_completion(&obj_request->completion);
2055 kref_init(&obj_request->kref);
2057 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2058 offset, length, (int)type, obj_request);
2063 static void rbd_obj_request_destroy(struct kref *kref)
2065 struct rbd_obj_request *obj_request;
2067 obj_request = container_of(kref, struct rbd_obj_request, kref);
2069 dout("%s: obj %p\n", __func__, obj_request);
2071 rbd_assert(obj_request->img_request == NULL);
2072 rbd_assert(obj_request->which == BAD_WHICH);
2074 if (obj_request->osd_req)
2075 rbd_osd_req_destroy(obj_request->osd_req);
2077 rbd_assert(obj_request_type_valid(obj_request->type));
2078 switch (obj_request->type) {
2079 case OBJ_REQUEST_NODATA:
2080 break; /* Nothing to do */
2081 case OBJ_REQUEST_BIO:
2082 if (obj_request->bio_list)
2083 bio_chain_put(obj_request->bio_list);
2085 case OBJ_REQUEST_PAGES:
2086 if (obj_request->pages)
2087 ceph_release_page_vector(obj_request->pages,
2088 obj_request->page_count);
2092 kfree(obj_request->object_name);
2093 obj_request->object_name = NULL;
2094 kmem_cache_free(rbd_obj_request_cache, obj_request);
2097 /* It's OK to call this for a device with no parent */
2099 static void rbd_spec_put(struct rbd_spec *spec);
2100 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2102 rbd_dev_remove_parent(rbd_dev);
2103 rbd_spec_put(rbd_dev->parent_spec);
2104 rbd_dev->parent_spec = NULL;
2105 rbd_dev->parent_overlap = 0;
2109 * Parent image reference counting is used to determine when an
2110 * image's parent fields can be safely torn down--after there are no
2111 * more in-flight requests to the parent image. When the last
2112 * reference is dropped, cleaning them up is safe.
2114 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2118 if (!rbd_dev->parent_spec)
2121 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2125 /* Last reference; clean up parent data structures */
2128 rbd_dev_unparent(rbd_dev);
2130 rbd_warn(rbd_dev, "parent reference underflow");
2134 * If an image has a non-zero parent overlap, get a reference to its
2137 * Returns true if the rbd device has a parent with a non-zero
2138 * overlap and a reference for it was successfully taken, or
2141 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2145 if (!rbd_dev->parent_spec)
2148 down_read(&rbd_dev->header_rwsem);
2149 if (rbd_dev->parent_overlap)
2150 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2151 up_read(&rbd_dev->header_rwsem);
2154 rbd_warn(rbd_dev, "parent reference overflow");
2160 * Caller is responsible for filling in the list of object requests
2161 * that comprises the image request, and the Linux request pointer
2162 * (if there is one).
2164 static struct rbd_img_request *rbd_img_request_create(
2165 struct rbd_device *rbd_dev,
2166 u64 offset, u64 length,
2167 enum obj_operation_type op_type,
2168 struct ceph_snap_context *snapc)
2170 struct rbd_img_request *img_request;
2172 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2176 img_request->rq = NULL;
2177 img_request->rbd_dev = rbd_dev;
2178 img_request->offset = offset;
2179 img_request->length = length;
2180 img_request->flags = 0;
2181 if (op_type == OBJ_OP_DISCARD) {
2182 img_request_discard_set(img_request);
2183 img_request->snapc = snapc;
2184 } else if (op_type == OBJ_OP_WRITE) {
2185 img_request_write_set(img_request);
2186 img_request->snapc = snapc;
2188 img_request->snap_id = rbd_dev->spec->snap_id;
2190 if (rbd_dev_parent_get(rbd_dev))
2191 img_request_layered_set(img_request);
2192 spin_lock_init(&img_request->completion_lock);
2193 img_request->next_completion = 0;
2194 img_request->callback = NULL;
2195 img_request->result = 0;
2196 img_request->obj_request_count = 0;
2197 INIT_LIST_HEAD(&img_request->obj_requests);
2198 kref_init(&img_request->kref);
2200 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2201 obj_op_name(op_type), offset, length, img_request);
2206 static void rbd_img_request_destroy(struct kref *kref)
2208 struct rbd_img_request *img_request;
2209 struct rbd_obj_request *obj_request;
2210 struct rbd_obj_request *next_obj_request;
2212 img_request = container_of(kref, struct rbd_img_request, kref);
2214 dout("%s: img %p\n", __func__, img_request);
2216 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2217 rbd_img_obj_request_del(img_request, obj_request);
2218 rbd_assert(img_request->obj_request_count == 0);
2220 if (img_request_layered_test(img_request)) {
2221 img_request_layered_clear(img_request);
2222 rbd_dev_parent_put(img_request->rbd_dev);
2225 if (img_request_write_test(img_request) ||
2226 img_request_discard_test(img_request))
2227 ceph_put_snap_context(img_request->snapc);
2229 kmem_cache_free(rbd_img_request_cache, img_request);
2232 static struct rbd_img_request *rbd_parent_request_create(
2233 struct rbd_obj_request *obj_request,
2234 u64 img_offset, u64 length)
2236 struct rbd_img_request *parent_request;
2237 struct rbd_device *rbd_dev;
2239 rbd_assert(obj_request->img_request);
2240 rbd_dev = obj_request->img_request->rbd_dev;
2242 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2243 length, OBJ_OP_READ, NULL);
2244 if (!parent_request)
2247 img_request_child_set(parent_request);
2248 rbd_obj_request_get(obj_request);
2249 parent_request->obj_request = obj_request;
2251 return parent_request;
2254 static void rbd_parent_request_destroy(struct kref *kref)
2256 struct rbd_img_request *parent_request;
2257 struct rbd_obj_request *orig_request;
2259 parent_request = container_of(kref, struct rbd_img_request, kref);
2260 orig_request = parent_request->obj_request;
2262 parent_request->obj_request = NULL;
2263 rbd_obj_request_put(orig_request);
2264 img_request_child_clear(parent_request);
2266 rbd_img_request_destroy(kref);
2269 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2271 struct rbd_img_request *img_request;
2272 unsigned int xferred;
2276 rbd_assert(obj_request_img_data_test(obj_request));
2277 img_request = obj_request->img_request;
2279 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2280 xferred = (unsigned int)obj_request->xferred;
2281 result = obj_request->result;
2283 struct rbd_device *rbd_dev = img_request->rbd_dev;
2284 enum obj_operation_type op_type;
2286 if (img_request_discard_test(img_request))
2287 op_type = OBJ_OP_DISCARD;
2288 else if (img_request_write_test(img_request))
2289 op_type = OBJ_OP_WRITE;
2291 op_type = OBJ_OP_READ;
2293 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2294 obj_op_name(op_type), obj_request->length,
2295 obj_request->img_offset, obj_request->offset);
2296 rbd_warn(rbd_dev, " result %d xferred %x",
2298 if (!img_request->result)
2299 img_request->result = result;
2301 * Need to end I/O on the entire obj_request worth of
2302 * bytes in case of error.
2304 xferred = obj_request->length;
2307 /* Image object requests don't own their page array */
2309 if (obj_request->type == OBJ_REQUEST_PAGES) {
2310 obj_request->pages = NULL;
2311 obj_request->page_count = 0;
2314 if (img_request_child_test(img_request)) {
2315 rbd_assert(img_request->obj_request != NULL);
2316 more = obj_request->which < img_request->obj_request_count - 1;
2318 rbd_assert(img_request->rq != NULL);
2320 more = blk_update_request(img_request->rq, result, xferred);
2322 __blk_mq_end_request(img_request->rq, result);
2328 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2330 struct rbd_img_request *img_request;
2331 u32 which = obj_request->which;
2334 rbd_assert(obj_request_img_data_test(obj_request));
2335 img_request = obj_request->img_request;
2337 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2338 rbd_assert(img_request != NULL);
2339 rbd_assert(img_request->obj_request_count > 0);
2340 rbd_assert(which != BAD_WHICH);
2341 rbd_assert(which < img_request->obj_request_count);
2343 spin_lock_irq(&img_request->completion_lock);
2344 if (which != img_request->next_completion)
2347 for_each_obj_request_from(img_request, obj_request) {
2349 rbd_assert(which < img_request->obj_request_count);
2351 if (!obj_request_done_test(obj_request))
2353 more = rbd_img_obj_end_request(obj_request);
2357 rbd_assert(more ^ (which == img_request->obj_request_count));
2358 img_request->next_completion = which;
2360 spin_unlock_irq(&img_request->completion_lock);
2361 rbd_img_request_put(img_request);
2364 rbd_img_request_complete(img_request);
2368 * Add individual osd ops to the given ceph_osd_request and prepare
2369 * them for submission. num_ops is the current number of
2370 * osd operations already to the object request.
2372 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2373 struct ceph_osd_request *osd_request,
2374 enum obj_operation_type op_type,
2375 unsigned int num_ops)
2377 struct rbd_img_request *img_request = obj_request->img_request;
2378 struct rbd_device *rbd_dev = img_request->rbd_dev;
2379 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2380 u64 offset = obj_request->offset;
2381 u64 length = obj_request->length;
2385 if (op_type == OBJ_OP_DISCARD) {
2386 if (!offset && length == object_size &&
2387 (!img_request_layered_test(img_request) ||
2388 !obj_request_overlaps_parent(obj_request))) {
2389 opcode = CEPH_OSD_OP_DELETE;
2390 } else if ((offset + length == object_size)) {
2391 opcode = CEPH_OSD_OP_TRUNCATE;
2393 down_read(&rbd_dev->header_rwsem);
2394 img_end = rbd_dev->header.image_size;
2395 up_read(&rbd_dev->header_rwsem);
2397 if (obj_request->img_offset + length == img_end)
2398 opcode = CEPH_OSD_OP_TRUNCATE;
2400 opcode = CEPH_OSD_OP_ZERO;
2402 } else if (op_type == OBJ_OP_WRITE) {
2403 if (!offset && length == object_size)
2404 opcode = CEPH_OSD_OP_WRITEFULL;
2406 opcode = CEPH_OSD_OP_WRITE;
2407 osd_req_op_alloc_hint_init(osd_request, num_ops,
2408 object_size, object_size);
2411 opcode = CEPH_OSD_OP_READ;
2414 if (opcode == CEPH_OSD_OP_DELETE)
2415 osd_req_op_init(osd_request, num_ops, opcode, 0);
2417 osd_req_op_extent_init(osd_request, num_ops, opcode,
2418 offset, length, 0, 0);
2420 if (obj_request->type == OBJ_REQUEST_BIO)
2421 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2422 obj_request->bio_list, length);
2423 else if (obj_request->type == OBJ_REQUEST_PAGES)
2424 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2425 obj_request->pages, length,
2426 offset & ~PAGE_MASK, false, false);
2428 /* Discards are also writes */
2429 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2430 rbd_osd_req_format_write(obj_request);
2432 rbd_osd_req_format_read(obj_request);
2436 * Split up an image request into one or more object requests, each
2437 * to a different object. The "type" parameter indicates whether
2438 * "data_desc" is the pointer to the head of a list of bio
2439 * structures, or the base of a page array. In either case this
2440 * function assumes data_desc describes memory sufficient to hold
2441 * all data described by the image request.
2443 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2444 enum obj_request_type type,
2447 struct rbd_device *rbd_dev = img_request->rbd_dev;
2448 struct rbd_obj_request *obj_request = NULL;
2449 struct rbd_obj_request *next_obj_request;
2450 struct bio *bio_list = NULL;
2451 unsigned int bio_offset = 0;
2452 struct page **pages = NULL;
2453 enum obj_operation_type op_type;
2457 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2458 (int)type, data_desc);
2460 img_offset = img_request->offset;
2461 resid = img_request->length;
2462 rbd_assert(resid > 0);
2463 op_type = rbd_img_request_op_type(img_request);
2465 if (type == OBJ_REQUEST_BIO) {
2466 bio_list = data_desc;
2467 rbd_assert(img_offset ==
2468 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2469 } else if (type == OBJ_REQUEST_PAGES) {
2474 struct ceph_osd_request *osd_req;
2475 const char *object_name;
2479 object_name = rbd_segment_name(rbd_dev, img_offset);
2482 offset = rbd_segment_offset(rbd_dev, img_offset);
2483 length = rbd_segment_length(rbd_dev, img_offset, resid);
2484 obj_request = rbd_obj_request_create(object_name,
2485 offset, length, type);
2486 /* object request has its own copy of the object name */
2487 rbd_segment_name_free(object_name);
2492 * set obj_request->img_request before creating the
2493 * osd_request so that it gets the right snapc
2495 rbd_img_obj_request_add(img_request, obj_request);
2497 if (type == OBJ_REQUEST_BIO) {
2498 unsigned int clone_size;
2500 rbd_assert(length <= (u64)UINT_MAX);
2501 clone_size = (unsigned int)length;
2502 obj_request->bio_list =
2503 bio_chain_clone_range(&bio_list,
2507 if (!obj_request->bio_list)
2509 } else if (type == OBJ_REQUEST_PAGES) {
2510 unsigned int page_count;
2512 obj_request->pages = pages;
2513 page_count = (u32)calc_pages_for(offset, length);
2514 obj_request->page_count = page_count;
2515 if ((offset + length) & ~PAGE_MASK)
2516 page_count--; /* more on last page */
2517 pages += page_count;
2520 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2521 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2526 obj_request->osd_req = osd_req;
2527 obj_request->callback = rbd_img_obj_callback;
2528 obj_request->img_offset = img_offset;
2530 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2532 rbd_img_request_get(img_request);
2534 img_offset += length;
2541 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2542 rbd_img_obj_request_del(img_request, obj_request);
2548 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2550 struct rbd_img_request *img_request;
2551 struct rbd_device *rbd_dev;
2552 struct page **pages;
2555 dout("%s: obj %p\n", __func__, obj_request);
2557 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2558 obj_request->type == OBJ_REQUEST_NODATA);
2559 rbd_assert(obj_request_img_data_test(obj_request));
2560 img_request = obj_request->img_request;
2561 rbd_assert(img_request);
2563 rbd_dev = img_request->rbd_dev;
2564 rbd_assert(rbd_dev);
2566 pages = obj_request->copyup_pages;
2567 rbd_assert(pages != NULL);
2568 obj_request->copyup_pages = NULL;
2569 page_count = obj_request->copyup_page_count;
2570 rbd_assert(page_count);
2571 obj_request->copyup_page_count = 0;
2572 ceph_release_page_vector(pages, page_count);
2575 * We want the transfer count to reflect the size of the
2576 * original write request. There is no such thing as a
2577 * successful short write, so if the request was successful
2578 * we can just set it to the originally-requested length.
2580 if (!obj_request->result)
2581 obj_request->xferred = obj_request->length;
2583 obj_request_done_set(obj_request);
2587 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2589 struct rbd_obj_request *orig_request;
2590 struct ceph_osd_request *osd_req;
2591 struct ceph_osd_client *osdc;
2592 struct rbd_device *rbd_dev;
2593 struct page **pages;
2594 enum obj_operation_type op_type;
2599 rbd_assert(img_request_child_test(img_request));
2601 /* First get what we need from the image request */
2603 pages = img_request->copyup_pages;
2604 rbd_assert(pages != NULL);
2605 img_request->copyup_pages = NULL;
2606 page_count = img_request->copyup_page_count;
2607 rbd_assert(page_count);
2608 img_request->copyup_page_count = 0;
2610 orig_request = img_request->obj_request;
2611 rbd_assert(orig_request != NULL);
2612 rbd_assert(obj_request_type_valid(orig_request->type));
2613 img_result = img_request->result;
2614 parent_length = img_request->length;
2615 rbd_assert(parent_length == img_request->xferred);
2616 rbd_img_request_put(img_request);
2618 rbd_assert(orig_request->img_request);
2619 rbd_dev = orig_request->img_request->rbd_dev;
2620 rbd_assert(rbd_dev);
2623 * If the overlap has become 0 (most likely because the
2624 * image has been flattened) we need to free the pages
2625 * and re-submit the original write request.
2627 if (!rbd_dev->parent_overlap) {
2628 struct ceph_osd_client *osdc;
2630 ceph_release_page_vector(pages, page_count);
2631 osdc = &rbd_dev->rbd_client->client->osdc;
2632 img_result = rbd_obj_request_submit(osdc, orig_request);
2641 * The original osd request is of no use to use any more.
2642 * We need a new one that can hold the three ops in a copyup
2643 * request. Allocate the new copyup osd request for the
2644 * original request, and release the old one.
2646 img_result = -ENOMEM;
2647 osd_req = rbd_osd_req_create_copyup(orig_request);
2650 rbd_osd_req_destroy(orig_request->osd_req);
2651 orig_request->osd_req = osd_req;
2652 orig_request->copyup_pages = pages;
2653 orig_request->copyup_page_count = page_count;
2655 /* Initialize the copyup op */
2657 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2658 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2661 /* Add the other op(s) */
2663 op_type = rbd_img_request_op_type(orig_request->img_request);
2664 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2666 /* All set, send it off. */
2668 osdc = &rbd_dev->rbd_client->client->osdc;
2669 img_result = rbd_obj_request_submit(osdc, orig_request);
2673 /* Record the error code and complete the request */
2675 orig_request->result = img_result;
2676 orig_request->xferred = 0;
2677 obj_request_done_set(orig_request);
2678 rbd_obj_request_complete(orig_request);
2682 * Read from the parent image the range of data that covers the
2683 * entire target of the given object request. This is used for
2684 * satisfying a layered image write request when the target of an
2685 * object request from the image request does not exist.
2687 * A page array big enough to hold the returned data is allocated
2688 * and supplied to rbd_img_request_fill() as the "data descriptor."
2689 * When the read completes, this page array will be transferred to
2690 * the original object request for the copyup operation.
2692 * If an error occurs, record it as the result of the original
2693 * object request and mark it done so it gets completed.
2695 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2697 struct rbd_img_request *img_request = NULL;
2698 struct rbd_img_request *parent_request = NULL;
2699 struct rbd_device *rbd_dev;
2702 struct page **pages = NULL;
2706 rbd_assert(obj_request_img_data_test(obj_request));
2707 rbd_assert(obj_request_type_valid(obj_request->type));
2709 img_request = obj_request->img_request;
2710 rbd_assert(img_request != NULL);
2711 rbd_dev = img_request->rbd_dev;
2712 rbd_assert(rbd_dev->parent != NULL);
2715 * Determine the byte range covered by the object in the
2716 * child image to which the original request was to be sent.
2718 img_offset = obj_request->img_offset - obj_request->offset;
2719 length = (u64)1 << rbd_dev->header.obj_order;
2722 * There is no defined parent data beyond the parent
2723 * overlap, so limit what we read at that boundary if
2726 if (img_offset + length > rbd_dev->parent_overlap) {
2727 rbd_assert(img_offset < rbd_dev->parent_overlap);
2728 length = rbd_dev->parent_overlap - img_offset;
2732 * Allocate a page array big enough to receive the data read
2735 page_count = (u32)calc_pages_for(0, length);
2736 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2737 if (IS_ERR(pages)) {
2738 result = PTR_ERR(pages);
2744 parent_request = rbd_parent_request_create(obj_request,
2745 img_offset, length);
2746 if (!parent_request)
2749 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2752 parent_request->copyup_pages = pages;
2753 parent_request->copyup_page_count = page_count;
2755 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2756 result = rbd_img_request_submit(parent_request);
2760 parent_request->copyup_pages = NULL;
2761 parent_request->copyup_page_count = 0;
2762 parent_request->obj_request = NULL;
2763 rbd_obj_request_put(obj_request);
2766 ceph_release_page_vector(pages, page_count);
2768 rbd_img_request_put(parent_request);
2769 obj_request->result = result;
2770 obj_request->xferred = 0;
2771 obj_request_done_set(obj_request);
2776 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2778 struct rbd_obj_request *orig_request;
2779 struct rbd_device *rbd_dev;
2782 rbd_assert(!obj_request_img_data_test(obj_request));
2785 * All we need from the object request is the original
2786 * request and the result of the STAT op. Grab those, then
2787 * we're done with the request.
2789 orig_request = obj_request->obj_request;
2790 obj_request->obj_request = NULL;
2791 rbd_obj_request_put(orig_request);
2792 rbd_assert(orig_request);
2793 rbd_assert(orig_request->img_request);
2795 result = obj_request->result;
2796 obj_request->result = 0;
2798 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2799 obj_request, orig_request, result,
2800 obj_request->xferred, obj_request->length);
2801 rbd_obj_request_put(obj_request);
2804 * If the overlap has become 0 (most likely because the
2805 * image has been flattened) we need to free the pages
2806 * and re-submit the original write request.
2808 rbd_dev = orig_request->img_request->rbd_dev;
2809 if (!rbd_dev->parent_overlap) {
2810 struct ceph_osd_client *osdc;
2812 osdc = &rbd_dev->rbd_client->client->osdc;
2813 result = rbd_obj_request_submit(osdc, orig_request);
2819 * Our only purpose here is to determine whether the object
2820 * exists, and we don't want to treat the non-existence as
2821 * an error. If something else comes back, transfer the
2822 * error to the original request and complete it now.
2825 obj_request_existence_set(orig_request, true);
2826 } else if (result == -ENOENT) {
2827 obj_request_existence_set(orig_request, false);
2828 } else if (result) {
2829 orig_request->result = result;
2834 * Resubmit the original request now that we have recorded
2835 * whether the target object exists.
2837 orig_request->result = rbd_img_obj_request_submit(orig_request);
2839 if (orig_request->result)
2840 rbd_obj_request_complete(orig_request);
2843 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2845 struct rbd_obj_request *stat_request;
2846 struct rbd_device *rbd_dev;
2847 struct ceph_osd_client *osdc;
2848 struct page **pages = NULL;
2854 * The response data for a STAT call consists of:
2861 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2862 page_count = (u32)calc_pages_for(0, size);
2863 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2865 return PTR_ERR(pages);
2868 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2873 rbd_obj_request_get(obj_request);
2874 stat_request->obj_request = obj_request;
2875 stat_request->pages = pages;
2876 stat_request->page_count = page_count;
2878 rbd_assert(obj_request->img_request);
2879 rbd_dev = obj_request->img_request->rbd_dev;
2880 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2882 if (!stat_request->osd_req)
2884 stat_request->callback = rbd_img_obj_exists_callback;
2886 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2887 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2889 rbd_osd_req_format_read(stat_request);
2891 osdc = &rbd_dev->rbd_client->client->osdc;
2892 ret = rbd_obj_request_submit(osdc, stat_request);
2895 rbd_obj_request_put(obj_request);
2900 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2902 struct rbd_img_request *img_request;
2903 struct rbd_device *rbd_dev;
2905 rbd_assert(obj_request_img_data_test(obj_request));
2907 img_request = obj_request->img_request;
2908 rbd_assert(img_request);
2909 rbd_dev = img_request->rbd_dev;
2912 if (!img_request_write_test(img_request) &&
2913 !img_request_discard_test(img_request))
2916 /* Non-layered writes */
2917 if (!img_request_layered_test(img_request))
2921 * Layered writes outside of the parent overlap range don't
2922 * share any data with the parent.
2924 if (!obj_request_overlaps_parent(obj_request))
2928 * Entire-object layered writes - we will overwrite whatever
2929 * parent data there is anyway.
2931 if (!obj_request->offset &&
2932 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2936 * If the object is known to already exist, its parent data has
2937 * already been copied.
2939 if (obj_request_known_test(obj_request) &&
2940 obj_request_exists_test(obj_request))
2946 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2948 if (img_obj_request_simple(obj_request)) {
2949 struct rbd_device *rbd_dev;
2950 struct ceph_osd_client *osdc;
2952 rbd_dev = obj_request->img_request->rbd_dev;
2953 osdc = &rbd_dev->rbd_client->client->osdc;
2955 return rbd_obj_request_submit(osdc, obj_request);
2959 * It's a layered write. The target object might exist but
2960 * we may not know that yet. If we know it doesn't exist,
2961 * start by reading the data for the full target object from
2962 * the parent so we can use it for a copyup to the target.
2964 if (obj_request_known_test(obj_request))
2965 return rbd_img_obj_parent_read_full(obj_request);
2967 /* We don't know whether the target exists. Go find out. */
2969 return rbd_img_obj_exists_submit(obj_request);
2972 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2974 struct rbd_obj_request *obj_request;
2975 struct rbd_obj_request *next_obj_request;
2978 dout("%s: img %p\n", __func__, img_request);
2980 rbd_img_request_get(img_request);
2981 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2982 ret = rbd_img_obj_request_submit(obj_request);
2988 rbd_img_request_put(img_request);
2992 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2994 struct rbd_obj_request *obj_request;
2995 struct rbd_device *rbd_dev;
3000 rbd_assert(img_request_child_test(img_request));
3002 /* First get what we need from the image request and release it */
3004 obj_request = img_request->obj_request;
3005 img_xferred = img_request->xferred;
3006 img_result = img_request->result;
3007 rbd_img_request_put(img_request);
3010 * If the overlap has become 0 (most likely because the
3011 * image has been flattened) we need to re-submit the
3014 rbd_assert(obj_request);
3015 rbd_assert(obj_request->img_request);
3016 rbd_dev = obj_request->img_request->rbd_dev;
3017 if (!rbd_dev->parent_overlap) {
3018 struct ceph_osd_client *osdc;
3020 osdc = &rbd_dev->rbd_client->client->osdc;
3021 img_result = rbd_obj_request_submit(osdc, obj_request);
3026 obj_request->result = img_result;
3027 if (obj_request->result)
3031 * We need to zero anything beyond the parent overlap
3032 * boundary. Since rbd_img_obj_request_read_callback()
3033 * will zero anything beyond the end of a short read, an
3034 * easy way to do this is to pretend the data from the
3035 * parent came up short--ending at the overlap boundary.
3037 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3038 obj_end = obj_request->img_offset + obj_request->length;
3039 if (obj_end > rbd_dev->parent_overlap) {
3042 if (obj_request->img_offset < rbd_dev->parent_overlap)
3043 xferred = rbd_dev->parent_overlap -
3044 obj_request->img_offset;
3046 obj_request->xferred = min(img_xferred, xferred);
3048 obj_request->xferred = img_xferred;
3051 rbd_img_obj_request_read_callback(obj_request);
3052 rbd_obj_request_complete(obj_request);
3055 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3057 struct rbd_img_request *img_request;
3060 rbd_assert(obj_request_img_data_test(obj_request));
3061 rbd_assert(obj_request->img_request != NULL);
3062 rbd_assert(obj_request->result == (s32) -ENOENT);
3063 rbd_assert(obj_request_type_valid(obj_request->type));
3065 /* rbd_read_finish(obj_request, obj_request->length); */
3066 img_request = rbd_parent_request_create(obj_request,
3067 obj_request->img_offset,
3068 obj_request->length);
3073 if (obj_request->type == OBJ_REQUEST_BIO)
3074 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3075 obj_request->bio_list);
3077 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3078 obj_request->pages);
3082 img_request->callback = rbd_img_parent_read_callback;
3083 result = rbd_img_request_submit(img_request);
3090 rbd_img_request_put(img_request);
3091 obj_request->result = result;
3092 obj_request->xferred = 0;
3093 obj_request_done_set(obj_request);
3096 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
3098 struct rbd_obj_request *obj_request;
3099 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3102 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3103 OBJ_REQUEST_NODATA);
3108 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3110 if (!obj_request->osd_req)
3113 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
3115 rbd_osd_req_format_read(obj_request);
3117 ret = rbd_obj_request_submit(osdc, obj_request);
3120 ret = rbd_obj_request_wait(obj_request);
3122 rbd_obj_request_put(obj_request);
3127 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3129 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3132 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3133 rbd_dev->header_name, (unsigned long long)notify_id,
3134 (unsigned int)opcode);
3137 * Until adequate refresh error handling is in place, there is
3138 * not much we can do here, except warn.
3140 * See http://tracker.ceph.com/issues/5040
3142 ret = rbd_dev_refresh(rbd_dev);
3144 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3146 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3148 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
3152 * Send a (un)watch request and wait for the ack. Return a request
3153 * with a ref held on success or error.
3155 static struct rbd_obj_request *rbd_obj_watch_request_helper(
3156 struct rbd_device *rbd_dev,
3159 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3160 struct ceph_options *opts = osdc->client->options;
3161 struct rbd_obj_request *obj_request;
3164 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3165 OBJ_REQUEST_NODATA);
3167 return ERR_PTR(-ENOMEM);
3169 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3171 if (!obj_request->osd_req) {
3176 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3177 rbd_dev->watch_event->cookie, 0, watch);
3178 rbd_osd_req_format_write(obj_request);
3181 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3183 ret = rbd_obj_request_submit(osdc, obj_request);
3187 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
3191 ret = obj_request->result;
3194 rbd_obj_request_end(obj_request);
3201 rbd_obj_request_put(obj_request);
3202 return ERR_PTR(ret);
3206 * Initiate a watch request, synchronously.
3208 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3210 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3211 struct rbd_obj_request *obj_request;
3214 rbd_assert(!rbd_dev->watch_event);
3215 rbd_assert(!rbd_dev->watch_request);
3217 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3218 &rbd_dev->watch_event);
3222 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3223 if (IS_ERR(obj_request)) {
3224 ceph_osdc_cancel_event(rbd_dev->watch_event);
3225 rbd_dev->watch_event = NULL;
3226 return PTR_ERR(obj_request);
3230 * A watch request is set to linger, so the underlying osd
3231 * request won't go away until we unregister it. We retain
3232 * a pointer to the object request during that time (in
3233 * rbd_dev->watch_request), so we'll keep a reference to it.
3234 * We'll drop that reference after we've unregistered it in
3235 * rbd_dev_header_unwatch_sync().
3237 rbd_dev->watch_request = obj_request;
3243 * Tear down a watch request, synchronously.
3245 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3247 struct rbd_obj_request *obj_request;
3249 rbd_assert(rbd_dev->watch_event);
3250 rbd_assert(rbd_dev->watch_request);
3252 rbd_obj_request_end(rbd_dev->watch_request);
3253 rbd_obj_request_put(rbd_dev->watch_request);
3254 rbd_dev->watch_request = NULL;
3256 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3257 if (!IS_ERR(obj_request))
3258 rbd_obj_request_put(obj_request);
3260 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3261 PTR_ERR(obj_request));
3263 ceph_osdc_cancel_event(rbd_dev->watch_event);
3264 rbd_dev->watch_event = NULL;
3266 dout("%s flushing notifies\n", __func__);
3267 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3271 * Synchronous osd object method call. Returns the number of bytes
3272 * returned in the outbound buffer, or a negative error code.
3274 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3275 const char *object_name,
3276 const char *class_name,
3277 const char *method_name,
3278 const void *outbound,
3279 size_t outbound_size,
3281 size_t inbound_size)
3283 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3284 struct rbd_obj_request *obj_request;
3285 struct page **pages;
3290 * Method calls are ultimately read operations. The result
3291 * should placed into the inbound buffer provided. They
3292 * also supply outbound data--parameters for the object
3293 * method. Currently if this is present it will be a
3296 page_count = (u32)calc_pages_for(0, inbound_size);
3297 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3299 return PTR_ERR(pages);
3302 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3307 obj_request->pages = pages;
3308 obj_request->page_count = page_count;
3310 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3312 if (!obj_request->osd_req)
3315 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3316 class_name, method_name);
3317 if (outbound_size) {
3318 struct ceph_pagelist *pagelist;
3320 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3324 ceph_pagelist_init(pagelist);
3325 ceph_pagelist_append(pagelist, outbound, outbound_size);
3326 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3329 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3330 obj_request->pages, inbound_size,
3332 rbd_osd_req_format_read(obj_request);
3334 ret = rbd_obj_request_submit(osdc, obj_request);
3337 ret = rbd_obj_request_wait(obj_request);
3341 ret = obj_request->result;
3345 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3346 ret = (int)obj_request->xferred;
3347 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3350 rbd_obj_request_put(obj_request);
3352 ceph_release_page_vector(pages, page_count);
3357 static void rbd_queue_workfn(struct work_struct *work)
3359 struct request *rq = blk_mq_rq_from_pdu(work);
3360 struct rbd_device *rbd_dev = rq->q->queuedata;
3361 struct rbd_img_request *img_request;
3362 struct ceph_snap_context *snapc = NULL;
3363 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3364 u64 length = blk_rq_bytes(rq);
3365 enum obj_operation_type op_type;
3369 if (rq->cmd_type != REQ_TYPE_FS) {
3370 dout("%s: non-fs request type %d\n", __func__,
3371 (int) rq->cmd_type);
3376 if (rq->cmd_flags & REQ_DISCARD)
3377 op_type = OBJ_OP_DISCARD;
3378 else if (rq->cmd_flags & REQ_WRITE)
3379 op_type = OBJ_OP_WRITE;
3381 op_type = OBJ_OP_READ;
3383 /* Ignore/skip any zero-length requests */
3386 dout("%s: zero-length request\n", __func__);
3391 /* Only reads are allowed to a read-only device */
3393 if (op_type != OBJ_OP_READ) {
3394 if (rbd_dev->mapping.read_only) {
3398 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3402 * Quit early if the mapped snapshot no longer exists. It's
3403 * still possible the snapshot will have disappeared by the
3404 * time our request arrives at the osd, but there's no sense in
3405 * sending it if we already know.
3407 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3408 dout("request for non-existent snapshot");
3409 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3414 if (offset && length > U64_MAX - offset + 1) {
3415 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3418 goto err_rq; /* Shouldn't happen */
3421 blk_mq_start_request(rq);
3423 down_read(&rbd_dev->header_rwsem);
3424 mapping_size = rbd_dev->mapping.size;
3425 if (op_type != OBJ_OP_READ) {
3426 snapc = rbd_dev->header.snapc;
3427 ceph_get_snap_context(snapc);
3429 up_read(&rbd_dev->header_rwsem);
3431 if (offset + length > mapping_size) {
3432 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3433 length, mapping_size);
3438 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3444 img_request->rq = rq;
3445 snapc = NULL; /* img_request consumes a ref */
3447 if (op_type == OBJ_OP_DISCARD)
3448 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3451 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3454 goto err_img_request;
3456 result = rbd_img_request_submit(img_request);
3458 goto err_img_request;
3463 rbd_img_request_put(img_request);
3466 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3467 obj_op_name(op_type), length, offset, result);
3468 ceph_put_snap_context(snapc);
3470 blk_mq_end_request(rq, result);
3473 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3474 const struct blk_mq_queue_data *bd)
3476 struct request *rq = bd->rq;
3477 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3479 queue_work(rbd_wq, work);
3480 return BLK_MQ_RQ_QUEUE_OK;
3483 static void rbd_free_disk(struct rbd_device *rbd_dev)
3485 struct gendisk *disk = rbd_dev->disk;
3490 rbd_dev->disk = NULL;
3491 if (disk->flags & GENHD_FL_UP) {
3494 blk_cleanup_queue(disk->queue);
3495 blk_mq_free_tag_set(&rbd_dev->tag_set);
3500 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3501 const char *object_name,
3502 u64 offset, u64 length, void *buf)
3505 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3506 struct rbd_obj_request *obj_request;
3507 struct page **pages = NULL;
3512 page_count = (u32) calc_pages_for(offset, length);
3513 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3515 return PTR_ERR(pages);
3518 obj_request = rbd_obj_request_create(object_name, offset, length,
3523 obj_request->pages = pages;
3524 obj_request->page_count = page_count;
3526 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3528 if (!obj_request->osd_req)
3531 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3532 offset, length, 0, 0);
3533 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3535 obj_request->length,
3536 obj_request->offset & ~PAGE_MASK,
3538 rbd_osd_req_format_read(obj_request);
3540 ret = rbd_obj_request_submit(osdc, obj_request);
3543 ret = rbd_obj_request_wait(obj_request);
3547 ret = obj_request->result;
3551 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3552 size = (size_t) obj_request->xferred;
3553 ceph_copy_from_page_vector(pages, buf, 0, size);
3554 rbd_assert(size <= (size_t)INT_MAX);
3558 rbd_obj_request_put(obj_request);
3560 ceph_release_page_vector(pages, page_count);
3566 * Read the complete header for the given rbd device. On successful
3567 * return, the rbd_dev->header field will contain up-to-date
3568 * information about the image.
3570 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3572 struct rbd_image_header_ondisk *ondisk = NULL;
3579 * The complete header will include an array of its 64-bit
3580 * snapshot ids, followed by the names of those snapshots as
3581 * a contiguous block of NUL-terminated strings. Note that
3582 * the number of snapshots could change by the time we read
3583 * it in, in which case we re-read it.
3590 size = sizeof (*ondisk);
3591 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3593 ondisk = kmalloc(size, GFP_KERNEL);
3597 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3601 if ((size_t)ret < size) {
3603 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3607 if (!rbd_dev_ondisk_valid(ondisk)) {
3609 rbd_warn(rbd_dev, "invalid header");
3613 names_size = le64_to_cpu(ondisk->snap_names_len);
3614 want_count = snap_count;
3615 snap_count = le32_to_cpu(ondisk->snap_count);
3616 } while (snap_count != want_count);
3618 ret = rbd_header_from_disk(rbd_dev, ondisk);
3626 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3627 * has disappeared from the (just updated) snapshot context.
3629 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3633 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3636 snap_id = rbd_dev->spec->snap_id;
3637 if (snap_id == CEPH_NOSNAP)
3640 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3641 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3644 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3649 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3650 * try to update its size. If REMOVING is set, updating size
3651 * is just useless work since the device can't be opened.
3653 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3654 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3655 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3656 dout("setting size to %llu sectors", (unsigned long long)size);
3657 set_capacity(rbd_dev->disk, size);
3658 revalidate_disk(rbd_dev->disk);
3662 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3667 down_write(&rbd_dev->header_rwsem);
3668 mapping_size = rbd_dev->mapping.size;
3670 ret = rbd_dev_header_info(rbd_dev);
3675 * If there is a parent, see if it has disappeared due to the
3676 * mapped image getting flattened.
3678 if (rbd_dev->parent) {
3679 ret = rbd_dev_v2_parent_info(rbd_dev);
3684 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3685 rbd_dev->mapping.size = rbd_dev->header.image_size;
3687 /* validate mapped snapshot's EXISTS flag */
3688 rbd_exists_validate(rbd_dev);
3692 up_write(&rbd_dev->header_rwsem);
3693 if (!ret && mapping_size != rbd_dev->mapping.size)
3694 rbd_dev_update_size(rbd_dev);
3699 static int rbd_init_request(void *data, struct request *rq,
3700 unsigned int hctx_idx, unsigned int request_idx,
3701 unsigned int numa_node)
3703 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3705 INIT_WORK(work, rbd_queue_workfn);
3709 static struct blk_mq_ops rbd_mq_ops = {
3710 .queue_rq = rbd_queue_rq,
3711 .map_queue = blk_mq_map_queue,
3712 .init_request = rbd_init_request,
3715 static int rbd_init_disk(struct rbd_device *rbd_dev)
3717 struct gendisk *disk;
3718 struct request_queue *q;
3722 /* create gendisk info */
3723 disk = alloc_disk(single_major ?
3724 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3725 RBD_MINORS_PER_MAJOR);
3729 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3731 disk->major = rbd_dev->major;
3732 disk->first_minor = rbd_dev->minor;
3734 disk->flags |= GENHD_FL_EXT_DEVT;
3735 disk->fops = &rbd_bd_ops;
3736 disk->private_data = rbd_dev;
3738 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3739 rbd_dev->tag_set.ops = &rbd_mq_ops;
3740 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3741 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3742 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3743 rbd_dev->tag_set.nr_hw_queues = 1;
3744 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3746 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3750 q = blk_mq_init_queue(&rbd_dev->tag_set);
3756 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3757 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3759 /* set io sizes to object size */
3760 segment_size = rbd_obj_bytes(&rbd_dev->header);
3761 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3762 q->limits.max_sectors = queue_max_hw_sectors(q);
3763 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
3764 blk_queue_max_segment_size(q, segment_size);
3765 blk_queue_io_min(q, segment_size);
3766 blk_queue_io_opt(q, segment_size);
3768 /* enable the discard support */
3769 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3770 q->limits.discard_granularity = segment_size;
3771 q->limits.discard_alignment = segment_size;
3772 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3773 q->limits.discard_zeroes_data = 1;
3775 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3776 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3780 q->queuedata = rbd_dev;
3782 rbd_dev->disk = disk;
3786 blk_mq_free_tag_set(&rbd_dev->tag_set);
3796 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3798 return container_of(dev, struct rbd_device, dev);
3801 static ssize_t rbd_size_show(struct device *dev,
3802 struct device_attribute *attr, char *buf)
3804 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3806 return sprintf(buf, "%llu\n",
3807 (unsigned long long)rbd_dev->mapping.size);
3811 * Note this shows the features for whatever's mapped, which is not
3812 * necessarily the base image.
3814 static ssize_t rbd_features_show(struct device *dev,
3815 struct device_attribute *attr, char *buf)
3817 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3819 return sprintf(buf, "0x%016llx\n",
3820 (unsigned long long)rbd_dev->mapping.features);
3823 static ssize_t rbd_major_show(struct device *dev,
3824 struct device_attribute *attr, char *buf)
3826 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3829 return sprintf(buf, "%d\n", rbd_dev->major);
3831 return sprintf(buf, "(none)\n");
3834 static ssize_t rbd_minor_show(struct device *dev,
3835 struct device_attribute *attr, char *buf)
3837 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3839 return sprintf(buf, "%d\n", rbd_dev->minor);
3842 static ssize_t rbd_client_id_show(struct device *dev,
3843 struct device_attribute *attr, char *buf)
3845 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3847 return sprintf(buf, "client%lld\n",
3848 ceph_client_id(rbd_dev->rbd_client->client));
3851 static ssize_t rbd_pool_show(struct device *dev,
3852 struct device_attribute *attr, char *buf)
3854 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3856 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3859 static ssize_t rbd_pool_id_show(struct device *dev,
3860 struct device_attribute *attr, char *buf)
3862 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3864 return sprintf(buf, "%llu\n",
3865 (unsigned long long) rbd_dev->spec->pool_id);
3868 static ssize_t rbd_name_show(struct device *dev,
3869 struct device_attribute *attr, char *buf)
3871 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3873 if (rbd_dev->spec->image_name)
3874 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3876 return sprintf(buf, "(unknown)\n");
3879 static ssize_t rbd_image_id_show(struct device *dev,
3880 struct device_attribute *attr, char *buf)
3882 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3884 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3888 * Shows the name of the currently-mapped snapshot (or
3889 * RBD_SNAP_HEAD_NAME for the base image).
3891 static ssize_t rbd_snap_show(struct device *dev,
3892 struct device_attribute *attr,
3895 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3897 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3901 * For a v2 image, shows the chain of parent images, separated by empty
3902 * lines. For v1 images or if there is no parent, shows "(no parent
3905 static ssize_t rbd_parent_show(struct device *dev,
3906 struct device_attribute *attr,
3909 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3912 if (!rbd_dev->parent)
3913 return sprintf(buf, "(no parent image)\n");
3915 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3916 struct rbd_spec *spec = rbd_dev->parent_spec;
3918 count += sprintf(&buf[count], "%s"
3919 "pool_id %llu\npool_name %s\n"
3920 "image_id %s\nimage_name %s\n"
3921 "snap_id %llu\nsnap_name %s\n"
3923 !count ? "" : "\n", /* first? */
3924 spec->pool_id, spec->pool_name,
3925 spec->image_id, spec->image_name ?: "(unknown)",
3926 spec->snap_id, spec->snap_name,
3927 rbd_dev->parent_overlap);
3933 static ssize_t rbd_image_refresh(struct device *dev,
3934 struct device_attribute *attr,
3938 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3941 ret = rbd_dev_refresh(rbd_dev);
3948 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3949 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3950 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3951 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3952 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3953 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3954 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3955 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3956 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3957 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3958 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3959 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3961 static struct attribute *rbd_attrs[] = {
3962 &dev_attr_size.attr,
3963 &dev_attr_features.attr,
3964 &dev_attr_major.attr,
3965 &dev_attr_minor.attr,
3966 &dev_attr_client_id.attr,
3967 &dev_attr_pool.attr,
3968 &dev_attr_pool_id.attr,
3969 &dev_attr_name.attr,
3970 &dev_attr_image_id.attr,
3971 &dev_attr_current_snap.attr,
3972 &dev_attr_parent.attr,
3973 &dev_attr_refresh.attr,
3977 static struct attribute_group rbd_attr_group = {
3981 static const struct attribute_group *rbd_attr_groups[] = {
3986 static void rbd_dev_release(struct device *dev);
3988 static struct device_type rbd_device_type = {
3990 .groups = rbd_attr_groups,
3991 .release = rbd_dev_release,
3994 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3996 kref_get(&spec->kref);
4001 static void rbd_spec_free(struct kref *kref);
4002 static void rbd_spec_put(struct rbd_spec *spec)
4005 kref_put(&spec->kref, rbd_spec_free);
4008 static struct rbd_spec *rbd_spec_alloc(void)
4010 struct rbd_spec *spec;
4012 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4016 spec->pool_id = CEPH_NOPOOL;
4017 spec->snap_id = CEPH_NOSNAP;
4018 kref_init(&spec->kref);
4023 static void rbd_spec_free(struct kref *kref)
4025 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4027 kfree(spec->pool_name);
4028 kfree(spec->image_id);
4029 kfree(spec->image_name);
4030 kfree(spec->snap_name);
4034 static void rbd_dev_release(struct device *dev)
4036 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4037 bool need_put = !!rbd_dev->opts;
4039 rbd_put_client(rbd_dev->rbd_client);
4040 rbd_spec_put(rbd_dev->spec);
4041 kfree(rbd_dev->opts);
4045 * This is racy, but way better than putting module outside of
4046 * the release callback. The race window is pretty small, so
4047 * doing something similar to dm (dm-builtin.c) is overkill.
4050 module_put(THIS_MODULE);
4053 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4054 struct rbd_spec *spec,
4055 struct rbd_options *opts)
4057 struct rbd_device *rbd_dev;
4059 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4063 spin_lock_init(&rbd_dev->lock);
4065 atomic_set(&rbd_dev->parent_ref, 0);
4066 INIT_LIST_HEAD(&rbd_dev->node);
4067 init_rwsem(&rbd_dev->header_rwsem);
4069 rbd_dev->dev.bus = &rbd_bus_type;
4070 rbd_dev->dev.type = &rbd_device_type;
4071 rbd_dev->dev.parent = &rbd_root_dev;
4072 device_initialize(&rbd_dev->dev);
4074 rbd_dev->rbd_client = rbdc;
4075 rbd_dev->spec = spec;
4076 rbd_dev->opts = opts;
4078 /* Initialize the layout used for all rbd requests */
4080 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4081 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4082 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4083 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4086 * If this is a mapping rbd_dev (as opposed to a parent one),
4087 * pin our module. We have a ref from do_rbd_add(), so use
4091 __module_get(THIS_MODULE);
4096 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4099 put_device(&rbd_dev->dev);
4103 * Get the size and object order for an image snapshot, or if
4104 * snap_id is CEPH_NOSNAP, gets this information for the base
4107 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4108 u8 *order, u64 *snap_size)
4110 __le64 snapid = cpu_to_le64(snap_id);
4115 } __attribute__ ((packed)) size_buf = { 0 };
4117 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4119 &snapid, sizeof (snapid),
4120 &size_buf, sizeof (size_buf));
4121 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4124 if (ret < sizeof (size_buf))
4128 *order = size_buf.order;
4129 dout(" order %u", (unsigned int)*order);
4131 *snap_size = le64_to_cpu(size_buf.size);
4133 dout(" snap_id 0x%016llx snap_size = %llu\n",
4134 (unsigned long long)snap_id,
4135 (unsigned long long)*snap_size);
4140 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4142 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4143 &rbd_dev->header.obj_order,
4144 &rbd_dev->header.image_size);
4147 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4153 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4157 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4158 "rbd", "get_object_prefix", NULL, 0,
4159 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4160 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4165 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4166 p + ret, NULL, GFP_NOIO);
4169 if (IS_ERR(rbd_dev->header.object_prefix)) {
4170 ret = PTR_ERR(rbd_dev->header.object_prefix);
4171 rbd_dev->header.object_prefix = NULL;
4173 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4181 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4184 __le64 snapid = cpu_to_le64(snap_id);
4188 } __attribute__ ((packed)) features_buf = { 0 };
4192 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4193 "rbd", "get_features",
4194 &snapid, sizeof (snapid),
4195 &features_buf, sizeof (features_buf));
4196 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4199 if (ret < sizeof (features_buf))
4202 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4204 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4209 *snap_features = le64_to_cpu(features_buf.features);
4211 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4212 (unsigned long long)snap_id,
4213 (unsigned long long)*snap_features,
4214 (unsigned long long)le64_to_cpu(features_buf.incompat));
4219 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4221 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4222 &rbd_dev->header.features);
4225 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4227 struct rbd_spec *parent_spec;
4229 void *reply_buf = NULL;
4239 parent_spec = rbd_spec_alloc();
4243 size = sizeof (__le64) + /* pool_id */
4244 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4245 sizeof (__le64) + /* snap_id */
4246 sizeof (__le64); /* overlap */
4247 reply_buf = kmalloc(size, GFP_KERNEL);
4253 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4254 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4255 "rbd", "get_parent",
4256 &snapid, sizeof (snapid),
4258 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4263 end = reply_buf + ret;
4265 ceph_decode_64_safe(&p, end, pool_id, out_err);
4266 if (pool_id == CEPH_NOPOOL) {
4268 * Either the parent never existed, or we have
4269 * record of it but the image got flattened so it no
4270 * longer has a parent. When the parent of a
4271 * layered image disappears we immediately set the
4272 * overlap to 0. The effect of this is that all new
4273 * requests will be treated as if the image had no
4276 if (rbd_dev->parent_overlap) {
4277 rbd_dev->parent_overlap = 0;
4278 rbd_dev_parent_put(rbd_dev);
4279 pr_info("%s: clone image has been flattened\n",
4280 rbd_dev->disk->disk_name);
4283 goto out; /* No parent? No problem. */
4286 /* The ceph file layout needs to fit pool id in 32 bits */
4289 if (pool_id > (u64)U32_MAX) {
4290 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4291 (unsigned long long)pool_id, U32_MAX);
4295 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4296 if (IS_ERR(image_id)) {
4297 ret = PTR_ERR(image_id);
4300 ceph_decode_64_safe(&p, end, snap_id, out_err);
4301 ceph_decode_64_safe(&p, end, overlap, out_err);
4304 * The parent won't change (except when the clone is
4305 * flattened, already handled that). So we only need to
4306 * record the parent spec we have not already done so.
4308 if (!rbd_dev->parent_spec) {
4309 parent_spec->pool_id = pool_id;
4310 parent_spec->image_id = image_id;
4311 parent_spec->snap_id = snap_id;
4312 rbd_dev->parent_spec = parent_spec;
4313 parent_spec = NULL; /* rbd_dev now owns this */
4319 * We always update the parent overlap. If it's zero we issue
4320 * a warning, as we will proceed as if there was no parent.
4324 /* refresh, careful to warn just once */
4325 if (rbd_dev->parent_overlap)
4327 "clone now standalone (overlap became 0)");
4330 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4333 rbd_dev->parent_overlap = overlap;
4339 rbd_spec_put(parent_spec);
4344 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4348 __le64 stripe_count;
4349 } __attribute__ ((packed)) striping_info_buf = { 0 };
4350 size_t size = sizeof (striping_info_buf);
4357 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4358 "rbd", "get_stripe_unit_count", NULL, 0,
4359 (char *)&striping_info_buf, size);
4360 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4367 * We don't actually support the "fancy striping" feature
4368 * (STRIPINGV2) yet, but if the striping sizes are the
4369 * defaults the behavior is the same as before. So find
4370 * out, and only fail if the image has non-default values.
4373 obj_size = (u64)1 << rbd_dev->header.obj_order;
4374 p = &striping_info_buf;
4375 stripe_unit = ceph_decode_64(&p);
4376 if (stripe_unit != obj_size) {
4377 rbd_warn(rbd_dev, "unsupported stripe unit "
4378 "(got %llu want %llu)",
4379 stripe_unit, obj_size);
4382 stripe_count = ceph_decode_64(&p);
4383 if (stripe_count != 1) {
4384 rbd_warn(rbd_dev, "unsupported stripe count "
4385 "(got %llu want 1)", stripe_count);
4388 rbd_dev->header.stripe_unit = stripe_unit;
4389 rbd_dev->header.stripe_count = stripe_count;
4394 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4396 size_t image_id_size;
4401 void *reply_buf = NULL;
4403 char *image_name = NULL;
4406 rbd_assert(!rbd_dev->spec->image_name);
4408 len = strlen(rbd_dev->spec->image_id);
4409 image_id_size = sizeof (__le32) + len;
4410 image_id = kmalloc(image_id_size, GFP_KERNEL);
4415 end = image_id + image_id_size;
4416 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4418 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4419 reply_buf = kmalloc(size, GFP_KERNEL);
4423 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4424 "rbd", "dir_get_name",
4425 image_id, image_id_size,
4430 end = reply_buf + ret;
4432 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4433 if (IS_ERR(image_name))
4436 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4444 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4446 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4447 const char *snap_name;
4450 /* Skip over names until we find the one we are looking for */
4452 snap_name = rbd_dev->header.snap_names;
4453 while (which < snapc->num_snaps) {
4454 if (!strcmp(name, snap_name))
4455 return snapc->snaps[which];
4456 snap_name += strlen(snap_name) + 1;
4462 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4464 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4469 for (which = 0; !found && which < snapc->num_snaps; which++) {
4470 const char *snap_name;
4472 snap_id = snapc->snaps[which];
4473 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4474 if (IS_ERR(snap_name)) {
4475 /* ignore no-longer existing snapshots */
4476 if (PTR_ERR(snap_name) == -ENOENT)
4481 found = !strcmp(name, snap_name);
4484 return found ? snap_id : CEPH_NOSNAP;
4488 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4489 * no snapshot by that name is found, or if an error occurs.
4491 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4493 if (rbd_dev->image_format == 1)
4494 return rbd_v1_snap_id_by_name(rbd_dev, name);
4496 return rbd_v2_snap_id_by_name(rbd_dev, name);
4500 * An image being mapped will have everything but the snap id.
4502 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4504 struct rbd_spec *spec = rbd_dev->spec;
4506 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4507 rbd_assert(spec->image_id && spec->image_name);
4508 rbd_assert(spec->snap_name);
4510 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4513 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4514 if (snap_id == CEPH_NOSNAP)
4517 spec->snap_id = snap_id;
4519 spec->snap_id = CEPH_NOSNAP;
4526 * A parent image will have all ids but none of the names.
4528 * All names in an rbd spec are dynamically allocated. It's OK if we
4529 * can't figure out the name for an image id.
4531 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4533 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4534 struct rbd_spec *spec = rbd_dev->spec;
4535 const char *pool_name;
4536 const char *image_name;
4537 const char *snap_name;
4540 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4541 rbd_assert(spec->image_id);
4542 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4544 /* Get the pool name; we have to make our own copy of this */
4546 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4548 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4551 pool_name = kstrdup(pool_name, GFP_KERNEL);
4555 /* Fetch the image name; tolerate failure here */
4557 image_name = rbd_dev_image_name(rbd_dev);
4559 rbd_warn(rbd_dev, "unable to get image name");
4561 /* Fetch the snapshot name */
4563 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4564 if (IS_ERR(snap_name)) {
4565 ret = PTR_ERR(snap_name);
4569 spec->pool_name = pool_name;
4570 spec->image_name = image_name;
4571 spec->snap_name = snap_name;
4581 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4590 struct ceph_snap_context *snapc;
4594 * We'll need room for the seq value (maximum snapshot id),
4595 * snapshot count, and array of that many snapshot ids.
4596 * For now we have a fixed upper limit on the number we're
4597 * prepared to receive.
4599 size = sizeof (__le64) + sizeof (__le32) +
4600 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4601 reply_buf = kzalloc(size, GFP_KERNEL);
4605 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4606 "rbd", "get_snapcontext", NULL, 0,
4608 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4613 end = reply_buf + ret;
4615 ceph_decode_64_safe(&p, end, seq, out);
4616 ceph_decode_32_safe(&p, end, snap_count, out);
4619 * Make sure the reported number of snapshot ids wouldn't go
4620 * beyond the end of our buffer. But before checking that,
4621 * make sure the computed size of the snapshot context we
4622 * allocate is representable in a size_t.
4624 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4629 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4633 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4639 for (i = 0; i < snap_count; i++)
4640 snapc->snaps[i] = ceph_decode_64(&p);
4642 ceph_put_snap_context(rbd_dev->header.snapc);
4643 rbd_dev->header.snapc = snapc;
4645 dout(" snap context seq = %llu, snap_count = %u\n",
4646 (unsigned long long)seq, (unsigned int)snap_count);
4653 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4664 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4665 reply_buf = kmalloc(size, GFP_KERNEL);
4667 return ERR_PTR(-ENOMEM);
4669 snapid = cpu_to_le64(snap_id);
4670 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4671 "rbd", "get_snapshot_name",
4672 &snapid, sizeof (snapid),
4674 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4676 snap_name = ERR_PTR(ret);
4681 end = reply_buf + ret;
4682 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4683 if (IS_ERR(snap_name))
4686 dout(" snap_id 0x%016llx snap_name = %s\n",
4687 (unsigned long long)snap_id, snap_name);
4694 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4696 bool first_time = rbd_dev->header.object_prefix == NULL;
4699 ret = rbd_dev_v2_image_size(rbd_dev);
4704 ret = rbd_dev_v2_header_onetime(rbd_dev);
4709 ret = rbd_dev_v2_snap_context(rbd_dev);
4710 if (ret && first_time) {
4711 kfree(rbd_dev->header.object_prefix);
4712 rbd_dev->header.object_prefix = NULL;
4718 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4720 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4722 if (rbd_dev->image_format == 1)
4723 return rbd_dev_v1_header_info(rbd_dev);
4725 return rbd_dev_v2_header_info(rbd_dev);
4729 * Get a unique rbd identifier for the given new rbd_dev, and add
4730 * the rbd_dev to the global list.
4732 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4736 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4737 0, minor_to_rbd_dev_id(1 << MINORBITS),
4742 rbd_dev->dev_id = new_dev_id;
4744 spin_lock(&rbd_dev_list_lock);
4745 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4746 spin_unlock(&rbd_dev_list_lock);
4748 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4754 * Remove an rbd_dev from the global list, and record that its
4755 * identifier is no longer in use.
4757 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4759 spin_lock(&rbd_dev_list_lock);
4760 list_del_init(&rbd_dev->node);
4761 spin_unlock(&rbd_dev_list_lock);
4763 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4765 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4769 * Skips over white space at *buf, and updates *buf to point to the
4770 * first found non-space character (if any). Returns the length of
4771 * the token (string of non-white space characters) found. Note
4772 * that *buf must be terminated with '\0'.
4774 static inline size_t next_token(const char **buf)
4777 * These are the characters that produce nonzero for
4778 * isspace() in the "C" and "POSIX" locales.
4780 const char *spaces = " \f\n\r\t\v";
4782 *buf += strspn(*buf, spaces); /* Find start of token */
4784 return strcspn(*buf, spaces); /* Return token length */
4788 * Finds the next token in *buf, dynamically allocates a buffer big
4789 * enough to hold a copy of it, and copies the token into the new
4790 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4791 * that a duplicate buffer is created even for a zero-length token.
4793 * Returns a pointer to the newly-allocated duplicate, or a null
4794 * pointer if memory for the duplicate was not available. If
4795 * the lenp argument is a non-null pointer, the length of the token
4796 * (not including the '\0') is returned in *lenp.
4798 * If successful, the *buf pointer will be updated to point beyond
4799 * the end of the found token.
4801 * Note: uses GFP_KERNEL for allocation.
4803 static inline char *dup_token(const char **buf, size_t *lenp)
4808 len = next_token(buf);
4809 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4812 *(dup + len) = '\0';
4822 * Parse the options provided for an "rbd add" (i.e., rbd image
4823 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4824 * and the data written is passed here via a NUL-terminated buffer.
4825 * Returns 0 if successful or an error code otherwise.
4827 * The information extracted from these options is recorded in
4828 * the other parameters which return dynamically-allocated
4831 * The address of a pointer that will refer to a ceph options
4832 * structure. Caller must release the returned pointer using
4833 * ceph_destroy_options() when it is no longer needed.
4835 * Address of an rbd options pointer. Fully initialized by
4836 * this function; caller must release with kfree().
4838 * Address of an rbd image specification pointer. Fully
4839 * initialized by this function based on parsed options.
4840 * Caller must release with rbd_spec_put().
4842 * The options passed take this form:
4843 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4846 * A comma-separated list of one or more monitor addresses.
4847 * A monitor address is an ip address, optionally followed
4848 * by a port number (separated by a colon).
4849 * I.e.: ip1[:port1][,ip2[:port2]...]
4851 * A comma-separated list of ceph and/or rbd options.
4853 * The name of the rados pool containing the rbd image.
4855 * The name of the image in that pool to map.
4857 * An optional snapshot id. If provided, the mapping will
4858 * present data from the image at the time that snapshot was
4859 * created. The image head is used if no snapshot id is
4860 * provided. Snapshot mappings are always read-only.
4862 static int rbd_add_parse_args(const char *buf,
4863 struct ceph_options **ceph_opts,
4864 struct rbd_options **opts,
4865 struct rbd_spec **rbd_spec)
4869 const char *mon_addrs;
4871 size_t mon_addrs_size;
4872 struct rbd_spec *spec = NULL;
4873 struct rbd_options *rbd_opts = NULL;
4874 struct ceph_options *copts;
4877 /* The first four tokens are required */
4879 len = next_token(&buf);
4881 rbd_warn(NULL, "no monitor address(es) provided");
4885 mon_addrs_size = len + 1;
4889 options = dup_token(&buf, NULL);
4893 rbd_warn(NULL, "no options provided");
4897 spec = rbd_spec_alloc();
4901 spec->pool_name = dup_token(&buf, NULL);
4902 if (!spec->pool_name)
4904 if (!*spec->pool_name) {
4905 rbd_warn(NULL, "no pool name provided");
4909 spec->image_name = dup_token(&buf, NULL);
4910 if (!spec->image_name)
4912 if (!*spec->image_name) {
4913 rbd_warn(NULL, "no image name provided");
4918 * Snapshot name is optional; default is to use "-"
4919 * (indicating the head/no snapshot).
4921 len = next_token(&buf);
4923 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4924 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4925 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4926 ret = -ENAMETOOLONG;
4929 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4932 *(snap_name + len) = '\0';
4933 spec->snap_name = snap_name;
4935 /* Initialize all rbd options to the defaults */
4937 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4941 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4942 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
4944 copts = ceph_parse_options(options, mon_addrs,
4945 mon_addrs + mon_addrs_size - 1,
4946 parse_rbd_opts_token, rbd_opts);
4947 if (IS_ERR(copts)) {
4948 ret = PTR_ERR(copts);
4969 * Return pool id (>= 0) or a negative error code.
4971 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4973 struct ceph_options *opts = rbdc->client->options;
4979 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4980 if (ret == -ENOENT && tries++ < 1) {
4981 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4986 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4987 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4988 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4990 opts->mount_timeout);
4993 /* the osdmap we have is new enough */
5002 * An rbd format 2 image has a unique identifier, distinct from the
5003 * name given to it by the user. Internally, that identifier is
5004 * what's used to specify the names of objects related to the image.
5006 * A special "rbd id" object is used to map an rbd image name to its
5007 * id. If that object doesn't exist, then there is no v2 rbd image
5008 * with the supplied name.
5010 * This function will record the given rbd_dev's image_id field if
5011 * it can be determined, and in that case will return 0. If any
5012 * errors occur a negative errno will be returned and the rbd_dev's
5013 * image_id field will be unchanged (and should be NULL).
5015 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5024 * When probing a parent image, the image id is already
5025 * known (and the image name likely is not). There's no
5026 * need to fetch the image id again in this case. We
5027 * do still need to set the image format though.
5029 if (rbd_dev->spec->image_id) {
5030 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5036 * First, see if the format 2 image id file exists, and if
5037 * so, get the image's persistent id from it.
5039 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5040 object_name = kmalloc(size, GFP_NOIO);
5043 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5044 dout("rbd id object name is %s\n", object_name);
5046 /* Response will be an encoded string, which includes a length */
5048 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5049 response = kzalloc(size, GFP_NOIO);
5055 /* If it doesn't exist we'll assume it's a format 1 image */
5057 ret = rbd_obj_method_sync(rbd_dev, object_name,
5058 "rbd", "get_id", NULL, 0,
5059 response, RBD_IMAGE_ID_LEN_MAX);
5060 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5061 if (ret == -ENOENT) {
5062 image_id = kstrdup("", GFP_KERNEL);
5063 ret = image_id ? 0 : -ENOMEM;
5065 rbd_dev->image_format = 1;
5066 } else if (ret >= 0) {
5069 image_id = ceph_extract_encoded_string(&p, p + ret,
5071 ret = PTR_ERR_OR_ZERO(image_id);
5073 rbd_dev->image_format = 2;
5077 rbd_dev->spec->image_id = image_id;
5078 dout("image_id is %s\n", image_id);
5088 * Undo whatever state changes are made by v1 or v2 header info
5091 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5093 struct rbd_image_header *header;
5095 rbd_dev_parent_put(rbd_dev);
5097 /* Free dynamic fields from the header, then zero it out */
5099 header = &rbd_dev->header;
5100 ceph_put_snap_context(header->snapc);
5101 kfree(header->snap_sizes);
5102 kfree(header->snap_names);
5103 kfree(header->object_prefix);
5104 memset(header, 0, sizeof (*header));
5107 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5111 ret = rbd_dev_v2_object_prefix(rbd_dev);
5116 * Get the and check features for the image. Currently the
5117 * features are assumed to never change.
5119 ret = rbd_dev_v2_features(rbd_dev);
5123 /* If the image supports fancy striping, get its parameters */
5125 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5126 ret = rbd_dev_v2_striping_info(rbd_dev);
5130 /* No support for crypto and compression type format 2 images */
5134 rbd_dev->header.features = 0;
5135 kfree(rbd_dev->header.object_prefix);
5136 rbd_dev->header.object_prefix = NULL;
5142 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5143 * rbd_dev_image_probe() recursion depth, which means it's also the
5144 * length of the already discovered part of the parent chain.
5146 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5148 struct rbd_device *parent = NULL;
5151 if (!rbd_dev->parent_spec)
5154 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5155 pr_info("parent chain is too long (%d)\n", depth);
5160 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5168 * Images related by parent/child relationships always share
5169 * rbd_client and spec/parent_spec, so bump their refcounts.
5171 __rbd_get_client(rbd_dev->rbd_client);
5172 rbd_spec_get(rbd_dev->parent_spec);
5174 ret = rbd_dev_image_probe(parent, depth);
5178 rbd_dev->parent = parent;
5179 atomic_set(&rbd_dev->parent_ref, 1);
5183 rbd_dev_unparent(rbd_dev);
5184 rbd_dev_destroy(parent);
5189 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5192 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5196 /* Get an id and fill in device name. */
5198 ret = rbd_dev_id_get(rbd_dev);
5200 goto err_out_unlock;
5202 BUILD_BUG_ON(DEV_NAME_LEN
5203 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5204 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5206 /* Record our major and minor device numbers. */
5208 if (!single_major) {
5209 ret = register_blkdev(0, rbd_dev->name);
5213 rbd_dev->major = ret;
5216 rbd_dev->major = rbd_major;
5217 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5220 /* Set up the blkdev mapping. */
5222 ret = rbd_init_disk(rbd_dev);
5224 goto err_out_blkdev;
5226 ret = rbd_dev_mapping_set(rbd_dev);
5230 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5231 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5233 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5234 ret = device_add(&rbd_dev->dev);
5236 goto err_out_mapping;
5238 /* Everything's ready. Announce the disk to the world. */
5240 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5241 up_write(&rbd_dev->header_rwsem);
5243 add_disk(rbd_dev->disk);
5244 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5245 (unsigned long long) rbd_dev->mapping.size);
5250 rbd_dev_mapping_clear(rbd_dev);
5252 rbd_free_disk(rbd_dev);
5255 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5257 rbd_dev_id_put(rbd_dev);
5259 up_write(&rbd_dev->header_rwsem);
5263 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5265 struct rbd_spec *spec = rbd_dev->spec;
5268 /* Record the header object name for this rbd image. */
5270 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5272 if (rbd_dev->image_format == 1)
5273 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5275 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5277 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5278 if (!rbd_dev->header_name)
5281 if (rbd_dev->image_format == 1)
5282 sprintf(rbd_dev->header_name, "%s%s",
5283 spec->image_name, RBD_SUFFIX);
5285 sprintf(rbd_dev->header_name, "%s%s",
5286 RBD_HEADER_PREFIX, spec->image_id);
5290 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5292 rbd_dev_unprobe(rbd_dev);
5293 kfree(rbd_dev->header_name);
5294 rbd_dev->header_name = NULL;
5295 rbd_dev->image_format = 0;
5296 kfree(rbd_dev->spec->image_id);
5297 rbd_dev->spec->image_id = NULL;
5299 rbd_dev_destroy(rbd_dev);
5303 * Probe for the existence of the header object for the given rbd
5304 * device. If this image is the one being mapped (i.e., not a
5305 * parent), initiate a watch on its header object before using that
5306 * object to get detailed information about the rbd image.
5308 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5313 * Get the id from the image id object. Unless there's an
5314 * error, rbd_dev->spec->image_id will be filled in with
5315 * a dynamically-allocated string, and rbd_dev->image_format
5316 * will be set to either 1 or 2.
5318 ret = rbd_dev_image_id(rbd_dev);
5322 ret = rbd_dev_header_name(rbd_dev);
5324 goto err_out_format;
5327 ret = rbd_dev_header_watch_sync(rbd_dev);
5330 pr_info("image %s/%s does not exist\n",
5331 rbd_dev->spec->pool_name,
5332 rbd_dev->spec->image_name);
5333 goto out_header_name;
5337 ret = rbd_dev_header_info(rbd_dev);
5342 * If this image is the one being mapped, we have pool name and
5343 * id, image name and id, and snap name - need to fill snap id.
5344 * Otherwise this is a parent image, identified by pool, image
5345 * and snap ids - need to fill in names for those ids.
5348 ret = rbd_spec_fill_snap_id(rbd_dev);
5350 ret = rbd_spec_fill_names(rbd_dev);
5353 pr_info("snap %s/%s@%s does not exist\n",
5354 rbd_dev->spec->pool_name,
5355 rbd_dev->spec->image_name,
5356 rbd_dev->spec->snap_name);
5360 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5361 ret = rbd_dev_v2_parent_info(rbd_dev);
5366 * Need to warn users if this image is the one being
5367 * mapped and has a parent.
5369 if (!depth && rbd_dev->parent_spec)
5371 "WARNING: kernel layering is EXPERIMENTAL!");
5374 ret = rbd_dev_probe_parent(rbd_dev, depth);
5378 dout("discovered format %u image, header name is %s\n",
5379 rbd_dev->image_format, rbd_dev->header_name);
5383 rbd_dev_unprobe(rbd_dev);
5386 rbd_dev_header_unwatch_sync(rbd_dev);
5388 kfree(rbd_dev->header_name);
5389 rbd_dev->header_name = NULL;
5391 rbd_dev->image_format = 0;
5392 kfree(rbd_dev->spec->image_id);
5393 rbd_dev->spec->image_id = NULL;
5397 static ssize_t do_rbd_add(struct bus_type *bus,
5401 struct rbd_device *rbd_dev = NULL;
5402 struct ceph_options *ceph_opts = NULL;
5403 struct rbd_options *rbd_opts = NULL;
5404 struct rbd_spec *spec = NULL;
5405 struct rbd_client *rbdc;
5409 if (!try_module_get(THIS_MODULE))
5412 /* parse add command */
5413 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5417 rbdc = rbd_get_client(ceph_opts);
5424 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5427 pr_info("pool %s does not exist\n", spec->pool_name);
5428 goto err_out_client;
5430 spec->pool_id = (u64)rc;
5432 /* The ceph file layout needs to fit pool id in 32 bits */
5434 if (spec->pool_id > (u64)U32_MAX) {
5435 rbd_warn(NULL, "pool id too large (%llu > %u)",
5436 (unsigned long long)spec->pool_id, U32_MAX);
5438 goto err_out_client;
5441 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5444 goto err_out_client;
5446 rbdc = NULL; /* rbd_dev now owns this */
5447 spec = NULL; /* rbd_dev now owns this */
5448 rbd_opts = NULL; /* rbd_dev now owns this */
5450 down_write(&rbd_dev->header_rwsem);
5451 rc = rbd_dev_image_probe(rbd_dev, 0);
5453 goto err_out_rbd_dev;
5455 /* If we are mapping a snapshot it must be marked read-only */
5457 read_only = rbd_dev->opts->read_only;
5458 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5460 rbd_dev->mapping.read_only = read_only;
5462 rc = rbd_dev_device_setup(rbd_dev);
5465 * rbd_dev_header_unwatch_sync() can't be moved into
5466 * rbd_dev_image_release() without refactoring, see
5467 * commit 1f3ef78861ac.
5469 rbd_dev_header_unwatch_sync(rbd_dev);
5470 rbd_dev_image_release(rbd_dev);
5476 module_put(THIS_MODULE);
5480 up_write(&rbd_dev->header_rwsem);
5481 rbd_dev_destroy(rbd_dev);
5483 rbd_put_client(rbdc);
5490 static ssize_t rbd_add(struct bus_type *bus,
5497 return do_rbd_add(bus, buf, count);
5500 static ssize_t rbd_add_single_major(struct bus_type *bus,
5504 return do_rbd_add(bus, buf, count);
5507 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5509 rbd_free_disk(rbd_dev);
5510 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5511 device_del(&rbd_dev->dev);
5512 rbd_dev_mapping_clear(rbd_dev);
5514 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5515 rbd_dev_id_put(rbd_dev);
5518 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5520 while (rbd_dev->parent) {
5521 struct rbd_device *first = rbd_dev;
5522 struct rbd_device *second = first->parent;
5523 struct rbd_device *third;
5526 * Follow to the parent with no grandparent and
5529 while (second && (third = second->parent)) {
5534 rbd_dev_image_release(second);
5535 first->parent = NULL;
5536 first->parent_overlap = 0;
5538 rbd_assert(first->parent_spec);
5539 rbd_spec_put(first->parent_spec);
5540 first->parent_spec = NULL;
5544 static ssize_t do_rbd_remove(struct bus_type *bus,
5548 struct rbd_device *rbd_dev = NULL;
5549 struct list_head *tmp;
5552 bool already = false;
5555 ret = kstrtoul(buf, 10, &ul);
5559 /* convert to int; abort if we lost anything in the conversion */
5565 spin_lock(&rbd_dev_list_lock);
5566 list_for_each(tmp, &rbd_dev_list) {
5567 rbd_dev = list_entry(tmp, struct rbd_device, node);
5568 if (rbd_dev->dev_id == dev_id) {
5574 spin_lock_irq(&rbd_dev->lock);
5575 if (rbd_dev->open_count)
5578 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5580 spin_unlock_irq(&rbd_dev->lock);
5582 spin_unlock(&rbd_dev_list_lock);
5583 if (ret < 0 || already)
5586 rbd_dev_header_unwatch_sync(rbd_dev);
5589 * Don't free anything from rbd_dev->disk until after all
5590 * notifies are completely processed. Otherwise
5591 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5592 * in a potential use after free of rbd_dev->disk or rbd_dev.
5594 rbd_dev_device_release(rbd_dev);
5595 rbd_dev_image_release(rbd_dev);
5600 static ssize_t rbd_remove(struct bus_type *bus,
5607 return do_rbd_remove(bus, buf, count);
5610 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5614 return do_rbd_remove(bus, buf, count);
5618 * create control files in sysfs
5621 static int rbd_sysfs_init(void)
5625 ret = device_register(&rbd_root_dev);
5629 ret = bus_register(&rbd_bus_type);
5631 device_unregister(&rbd_root_dev);
5636 static void rbd_sysfs_cleanup(void)
5638 bus_unregister(&rbd_bus_type);
5639 device_unregister(&rbd_root_dev);
5642 static int rbd_slab_init(void)
5644 rbd_assert(!rbd_img_request_cache);
5645 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
5646 if (!rbd_img_request_cache)
5649 rbd_assert(!rbd_obj_request_cache);
5650 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
5651 if (!rbd_obj_request_cache)
5654 rbd_assert(!rbd_segment_name_cache);
5655 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5656 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5657 if (rbd_segment_name_cache)
5660 kmem_cache_destroy(rbd_obj_request_cache);
5661 rbd_obj_request_cache = NULL;
5663 kmem_cache_destroy(rbd_img_request_cache);
5664 rbd_img_request_cache = NULL;
5669 static void rbd_slab_exit(void)
5671 rbd_assert(rbd_segment_name_cache);
5672 kmem_cache_destroy(rbd_segment_name_cache);
5673 rbd_segment_name_cache = NULL;
5675 rbd_assert(rbd_obj_request_cache);
5676 kmem_cache_destroy(rbd_obj_request_cache);
5677 rbd_obj_request_cache = NULL;
5679 rbd_assert(rbd_img_request_cache);
5680 kmem_cache_destroy(rbd_img_request_cache);
5681 rbd_img_request_cache = NULL;
5684 static int __init rbd_init(void)
5688 if (!libceph_compatible(NULL)) {
5689 rbd_warn(NULL, "libceph incompatibility (quitting)");
5693 rc = rbd_slab_init();
5698 * The number of active work items is limited by the number of
5699 * rbd devices * queue depth, so leave @max_active at default.
5701 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5708 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5709 if (rbd_major < 0) {
5715 rc = rbd_sysfs_init();
5717 goto err_out_blkdev;
5720 pr_info("loaded (major %d)\n", rbd_major);
5722 pr_info("loaded\n");
5728 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5730 destroy_workqueue(rbd_wq);
5736 static void __exit rbd_exit(void)
5738 ida_destroy(&rbd_dev_id_ida);
5739 rbd_sysfs_cleanup();
5741 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5742 destroy_workqueue(rbd_wq);
5746 module_init(rbd_init);
5747 module_exit(rbd_exit);
5749 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5750 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5751 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5752 /* following authorship retained from original osdblk.c */
5753 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5755 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5756 MODULE_LICENSE("GPL");