2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
22 static DEFINE_IDA(region_ida);
24 static void nd_region_release(struct device *dev)
26 struct nd_region *nd_region = to_nd_region(dev);
29 for (i = 0; i < nd_region->ndr_mappings; i++) {
30 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
31 struct nvdimm *nvdimm = nd_mapping->nvdimm;
33 put_device(&nvdimm->dev);
35 ida_simple_remove(®ion_ida, nd_region->id);
39 static struct device_type nd_blk_device_type = {
41 .release = nd_region_release,
44 static struct device_type nd_pmem_device_type = {
46 .release = nd_region_release,
49 static struct device_type nd_volatile_device_type = {
50 .name = "nd_volatile",
51 .release = nd_region_release,
54 bool is_nd_pmem(struct device *dev)
56 return dev ? dev->type == &nd_pmem_device_type : false;
59 bool is_nd_blk(struct device *dev)
61 return dev ? dev->type == &nd_blk_device_type : false;
64 struct nd_region *to_nd_region(struct device *dev)
66 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
68 WARN_ON(dev->type->release != nd_region_release);
71 EXPORT_SYMBOL_GPL(to_nd_region);
74 * nd_region_to_nstype() - region to an integer namespace type
75 * @nd_region: region-device to interrogate
77 * This is the 'nstype' attribute of a region as well, an input to the
78 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
79 * namespace devices with namespace drivers.
81 int nd_region_to_nstype(struct nd_region *nd_region)
83 if (is_nd_pmem(&nd_region->dev)) {
86 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
87 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
88 struct nvdimm *nvdimm = nd_mapping->nvdimm;
90 if (nvdimm->flags & NDD_ALIASING)
94 return ND_DEVICE_NAMESPACE_PMEM;
96 return ND_DEVICE_NAMESPACE_IO;
97 } else if (is_nd_blk(&nd_region->dev)) {
98 return ND_DEVICE_NAMESPACE_BLK;
103 EXPORT_SYMBOL(nd_region_to_nstype);
105 static int is_uuid_busy(struct device *dev, void *data)
107 struct nd_region *nd_region = to_nd_region(dev->parent);
110 switch (nd_region_to_nstype(nd_region)) {
111 case ND_DEVICE_NAMESPACE_PMEM: {
112 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
116 if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0)
120 case ND_DEVICE_NAMESPACE_BLK: {
121 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
125 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0)
136 static int is_namespace_uuid_busy(struct device *dev, void *data)
138 if (is_nd_pmem(dev) || is_nd_blk(dev))
139 return device_for_each_child(dev, data, is_uuid_busy);
144 * nd_is_uuid_unique - verify that no other namespace has @uuid
145 * @dev: any device on a nvdimm_bus
146 * @uuid: uuid to check
148 bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
150 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
154 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
155 if (device_for_each_child(&nvdimm_bus->dev, uuid,
156 is_namespace_uuid_busy) != 0)
161 static ssize_t size_show(struct device *dev,
162 struct device_attribute *attr, char *buf)
164 struct nd_region *nd_region = to_nd_region(dev);
165 unsigned long long size = 0;
167 if (is_nd_pmem(dev)) {
168 size = nd_region->ndr_size;
169 } else if (nd_region->ndr_mappings == 1) {
170 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
172 size = nd_mapping->size;
175 return sprintf(buf, "%llu\n", size);
177 static DEVICE_ATTR_RO(size);
179 static ssize_t mappings_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
182 struct nd_region *nd_region = to_nd_region(dev);
184 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
186 static DEVICE_ATTR_RO(mappings);
188 static ssize_t nstype_show(struct device *dev,
189 struct device_attribute *attr, char *buf)
191 struct nd_region *nd_region = to_nd_region(dev);
193 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
195 static DEVICE_ATTR_RO(nstype);
197 static ssize_t set_cookie_show(struct device *dev,
198 struct device_attribute *attr, char *buf)
200 struct nd_region *nd_region = to_nd_region(dev);
201 struct nd_interleave_set *nd_set = nd_region->nd_set;
203 if (is_nd_pmem(dev) && nd_set)
204 /* pass, should be precluded by region_visible */;
208 return sprintf(buf, "%#llx\n", nd_set->cookie);
210 static DEVICE_ATTR_RO(set_cookie);
212 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
214 resource_size_t blk_max_overlap = 0, available, overlap;
217 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
221 overlap = blk_max_overlap;
222 for (i = 0; i < nd_region->ndr_mappings; i++) {
223 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
224 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
226 /* if a dimm is disabled the available capacity is zero */
230 if (is_nd_pmem(&nd_region->dev)) {
231 available += nd_pmem_available_dpa(nd_region,
232 nd_mapping, &overlap);
233 if (overlap > blk_max_overlap) {
234 blk_max_overlap = overlap;
237 } else if (is_nd_blk(&nd_region->dev)) {
238 available += nd_blk_available_dpa(nd_mapping);
245 static ssize_t available_size_show(struct device *dev,
246 struct device_attribute *attr, char *buf)
248 struct nd_region *nd_region = to_nd_region(dev);
249 unsigned long long available = 0;
252 * Flush in-flight updates and grab a snapshot of the available
253 * size. Of course, this value is potentially invalidated the
254 * memory nvdimm_bus_lock() is dropped, but that's userspace's
255 * problem to not race itself.
257 nvdimm_bus_lock(dev);
258 wait_nvdimm_bus_probe_idle(dev);
259 available = nd_region_available_dpa(nd_region);
260 nvdimm_bus_unlock(dev);
262 return sprintf(buf, "%llu\n", available);
264 static DEVICE_ATTR_RO(available_size);
266 static ssize_t init_namespaces_show(struct device *dev,
267 struct device_attribute *attr, char *buf)
269 struct nd_region_namespaces *num_ns = dev_get_drvdata(dev);
272 nvdimm_bus_lock(dev);
274 rc = sprintf(buf, "%d/%d\n", num_ns->active, num_ns->count);
277 nvdimm_bus_unlock(dev);
281 static DEVICE_ATTR_RO(init_namespaces);
283 static ssize_t namespace_seed_show(struct device *dev,
284 struct device_attribute *attr, char *buf)
286 struct nd_region *nd_region = to_nd_region(dev);
289 nvdimm_bus_lock(dev);
290 if (nd_region->ns_seed)
291 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
293 rc = sprintf(buf, "\n");
294 nvdimm_bus_unlock(dev);
297 static DEVICE_ATTR_RO(namespace_seed);
299 static ssize_t btt_seed_show(struct device *dev,
300 struct device_attribute *attr, char *buf)
302 struct nd_region *nd_region = to_nd_region(dev);
305 nvdimm_bus_lock(dev);
306 if (nd_region->btt_seed)
307 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
309 rc = sprintf(buf, "\n");
310 nvdimm_bus_unlock(dev);
314 static DEVICE_ATTR_RO(btt_seed);
316 static struct attribute *nd_region_attributes[] = {
318 &dev_attr_nstype.attr,
319 &dev_attr_mappings.attr,
320 &dev_attr_btt_seed.attr,
321 &dev_attr_set_cookie.attr,
322 &dev_attr_available_size.attr,
323 &dev_attr_namespace_seed.attr,
324 &dev_attr_init_namespaces.attr,
328 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
330 struct device *dev = container_of(kobj, typeof(*dev), kobj);
331 struct nd_region *nd_region = to_nd_region(dev);
332 struct nd_interleave_set *nd_set = nd_region->nd_set;
333 int type = nd_region_to_nstype(nd_region);
335 if (a != &dev_attr_set_cookie.attr
336 && a != &dev_attr_available_size.attr)
339 if ((type == ND_DEVICE_NAMESPACE_PMEM
340 || type == ND_DEVICE_NAMESPACE_BLK)
341 && a == &dev_attr_available_size.attr)
343 else if (is_nd_pmem(dev) && nd_set)
349 struct attribute_group nd_region_attribute_group = {
350 .attrs = nd_region_attributes,
351 .is_visible = region_visible,
353 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
355 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
357 struct nd_interleave_set *nd_set = nd_region->nd_set;
360 return nd_set->cookie;
365 * Upon successful probe/remove, take/release a reference on the
366 * associated interleave set (if present), and plant new btt + namespace
369 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
370 struct device *dev, bool probe)
372 struct nd_region *nd_region;
374 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
377 nd_region = to_nd_region(dev);
378 for (i = 0; i < nd_region->ndr_mappings; i++) {
379 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
380 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
381 struct nvdimm *nvdimm = nd_mapping->nvdimm;
383 kfree(nd_mapping->labels);
384 nd_mapping->labels = NULL;
386 nd_mapping->ndd = NULL;
387 atomic_dec(&nvdimm->busy);
390 if (dev->parent && is_nd_blk(dev->parent) && probe) {
391 nd_region = to_nd_region(dev->parent);
392 nvdimm_bus_lock(dev);
393 if (nd_region->ns_seed == dev)
394 nd_region_create_blk_seed(nd_region);
395 nvdimm_bus_unlock(dev);
397 if (is_nd_btt(dev) && probe) {
398 nd_region = to_nd_region(dev->parent);
399 nvdimm_bus_lock(dev);
400 if (nd_region->btt_seed == dev)
401 nd_region_create_btt_seed(nd_region);
402 nvdimm_bus_unlock(dev);
406 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
408 nd_region_notify_driver_action(nvdimm_bus, dev, true);
411 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
413 nd_region_notify_driver_action(nvdimm_bus, dev, false);
416 static ssize_t mappingN(struct device *dev, char *buf, int n)
418 struct nd_region *nd_region = to_nd_region(dev);
419 struct nd_mapping *nd_mapping;
420 struct nvdimm *nvdimm;
422 if (n >= nd_region->ndr_mappings)
424 nd_mapping = &nd_region->mapping[n];
425 nvdimm = nd_mapping->nvdimm;
427 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
428 nd_mapping->start, nd_mapping->size);
431 #define REGION_MAPPING(idx) \
432 static ssize_t mapping##idx##_show(struct device *dev, \
433 struct device_attribute *attr, char *buf) \
435 return mappingN(dev, buf, idx); \
437 static DEVICE_ATTR_RO(mapping##idx)
440 * 32 should be enough for a while, even in the presence of socket
441 * interleave a 32-way interleave set is a degenerate case.
476 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
478 struct device *dev = container_of(kobj, struct device, kobj);
479 struct nd_region *nd_region = to_nd_region(dev);
481 if (n < nd_region->ndr_mappings)
486 static struct attribute *mapping_attributes[] = {
487 &dev_attr_mapping0.attr,
488 &dev_attr_mapping1.attr,
489 &dev_attr_mapping2.attr,
490 &dev_attr_mapping3.attr,
491 &dev_attr_mapping4.attr,
492 &dev_attr_mapping5.attr,
493 &dev_attr_mapping6.attr,
494 &dev_attr_mapping7.attr,
495 &dev_attr_mapping8.attr,
496 &dev_attr_mapping9.attr,
497 &dev_attr_mapping10.attr,
498 &dev_attr_mapping11.attr,
499 &dev_attr_mapping12.attr,
500 &dev_attr_mapping13.attr,
501 &dev_attr_mapping14.attr,
502 &dev_attr_mapping15.attr,
503 &dev_attr_mapping16.attr,
504 &dev_attr_mapping17.attr,
505 &dev_attr_mapping18.attr,
506 &dev_attr_mapping19.attr,
507 &dev_attr_mapping20.attr,
508 &dev_attr_mapping21.attr,
509 &dev_attr_mapping22.attr,
510 &dev_attr_mapping23.attr,
511 &dev_attr_mapping24.attr,
512 &dev_attr_mapping25.attr,
513 &dev_attr_mapping26.attr,
514 &dev_attr_mapping27.attr,
515 &dev_attr_mapping28.attr,
516 &dev_attr_mapping29.attr,
517 &dev_attr_mapping30.attr,
518 &dev_attr_mapping31.attr,
522 struct attribute_group nd_mapping_attribute_group = {
523 .is_visible = mapping_visible,
524 .attrs = mapping_attributes,
526 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
528 void *nd_region_provider_data(struct nd_region *nd_region)
530 return nd_region->provider_data;
532 EXPORT_SYMBOL_GPL(nd_region_provider_data);
534 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
535 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
538 struct nd_region *nd_region;
542 for (i = 0; i < ndr_desc->num_mappings; i++) {
543 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
544 struct nvdimm *nvdimm = nd_mapping->nvdimm;
546 if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
547 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
548 caller, dev_name(&nvdimm->dev), i);
554 nd_region = kzalloc(sizeof(struct nd_region)
555 + sizeof(struct nd_mapping) * ndr_desc->num_mappings,
559 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL);
560 if (nd_region->id < 0) {
565 memcpy(nd_region->mapping, ndr_desc->nd_mapping,
566 sizeof(struct nd_mapping) * ndr_desc->num_mappings);
567 for (i = 0; i < ndr_desc->num_mappings; i++) {
568 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
569 struct nvdimm *nvdimm = nd_mapping->nvdimm;
571 get_device(&nvdimm->dev);
573 nd_region->ndr_mappings = ndr_desc->num_mappings;
574 nd_region->provider_data = ndr_desc->provider_data;
575 nd_region->nd_set = ndr_desc->nd_set;
576 ida_init(&nd_region->ns_ida);
577 ida_init(&nd_region->btt_ida);
578 dev = &nd_region->dev;
579 dev_set_name(dev, "region%d", nd_region->id);
580 dev->parent = &nvdimm_bus->dev;
581 dev->type = dev_type;
582 dev->groups = ndr_desc->attr_groups;
583 nd_region->ndr_size = resource_size(ndr_desc->res);
584 nd_region->ndr_start = ndr_desc->res->start;
585 nd_device_register(dev);
590 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
591 struct nd_region_desc *ndr_desc)
593 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
596 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
598 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
599 struct nd_region_desc *ndr_desc)
601 if (ndr_desc->num_mappings > 1)
603 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
606 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
608 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
609 struct nd_region_desc *ndr_desc)
611 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
614 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);