2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/sort.h>
23 static DEFINE_IDA(region_ida);
25 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
26 struct nd_region_data *ndrd)
30 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
31 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
32 for (i = 0; i < nvdimm->num_flush; i++) {
33 struct resource *res = &nvdimm->flush_wpq[i];
34 unsigned long pfn = PHYS_PFN(res->start);
35 void __iomem *flush_page;
37 /* check if flush hints share a page */
38 for (j = 0; j < i; j++) {
39 struct resource *res_j = &nvdimm->flush_wpq[j];
40 unsigned long pfn_j = PHYS_PFN(res_j->start);
47 flush_page = (void __iomem *) ((unsigned long)
48 ndrd->flush_wpq[dimm][j] & PAGE_MASK);
50 flush_page = devm_nvdimm_ioremap(dev,
51 PHYS_PFN(pfn), PAGE_SIZE);
54 ndrd->flush_wpq[dimm][i] = flush_page
55 + (res->start & ~PAGE_MASK);
61 int nd_region_activate(struct nd_region *nd_region)
64 struct nd_region_data *ndrd;
65 struct device *dev = &nd_region->dev;
66 size_t flush_data_size = sizeof(void *);
68 nvdimm_bus_lock(&nd_region->dev);
69 for (i = 0; i < nd_region->ndr_mappings; i++) {
70 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
71 struct nvdimm *nvdimm = nd_mapping->nvdimm;
73 /* at least one null hint slot per-dimm for the "no-hint" case */
74 flush_data_size += sizeof(void *);
75 if (!nvdimm->num_flush)
77 flush_data_size += nvdimm->num_flush * sizeof(void *);
79 nvdimm_bus_unlock(&nd_region->dev);
81 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
84 dev_set_drvdata(dev, ndrd);
86 for (i = 0; i < nd_region->ndr_mappings; i++) {
87 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
88 struct nvdimm *nvdimm = nd_mapping->nvdimm;
89 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
98 static void nd_region_release(struct device *dev)
100 struct nd_region *nd_region = to_nd_region(dev);
103 for (i = 0; i < nd_region->ndr_mappings; i++) {
104 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
105 struct nvdimm *nvdimm = nd_mapping->nvdimm;
107 put_device(&nvdimm->dev);
109 free_percpu(nd_region->lane);
110 ida_simple_remove(®ion_ida, nd_region->id);
112 kfree(to_nd_blk_region(dev));
117 static struct device_type nd_blk_device_type = {
119 .release = nd_region_release,
122 static struct device_type nd_pmem_device_type = {
124 .release = nd_region_release,
127 static struct device_type nd_volatile_device_type = {
128 .name = "nd_volatile",
129 .release = nd_region_release,
132 bool is_nd_pmem(struct device *dev)
134 return dev ? dev->type == &nd_pmem_device_type : false;
137 bool is_nd_blk(struct device *dev)
139 return dev ? dev->type == &nd_blk_device_type : false;
142 struct nd_region *to_nd_region(struct device *dev)
144 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
146 WARN_ON(dev->type->release != nd_region_release);
149 EXPORT_SYMBOL_GPL(to_nd_region);
151 struct nd_blk_region *to_nd_blk_region(struct device *dev)
153 struct nd_region *nd_region = to_nd_region(dev);
155 WARN_ON(!is_nd_blk(dev));
156 return container_of(nd_region, struct nd_blk_region, nd_region);
158 EXPORT_SYMBOL_GPL(to_nd_blk_region);
160 void *nd_region_provider_data(struct nd_region *nd_region)
162 return nd_region->provider_data;
164 EXPORT_SYMBOL_GPL(nd_region_provider_data);
166 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
168 return ndbr->blk_provider_data;
170 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
172 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
174 ndbr->blk_provider_data = data;
176 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
179 * nd_region_to_nstype() - region to an integer namespace type
180 * @nd_region: region-device to interrogate
182 * This is the 'nstype' attribute of a region as well, an input to the
183 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
184 * namespace devices with namespace drivers.
186 int nd_region_to_nstype(struct nd_region *nd_region)
188 if (is_nd_pmem(&nd_region->dev)) {
191 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
192 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
193 struct nvdimm *nvdimm = nd_mapping->nvdimm;
195 if (nvdimm->flags & NDD_ALIASING)
199 return ND_DEVICE_NAMESPACE_PMEM;
201 return ND_DEVICE_NAMESPACE_IO;
202 } else if (is_nd_blk(&nd_region->dev)) {
203 return ND_DEVICE_NAMESPACE_BLK;
208 EXPORT_SYMBOL(nd_region_to_nstype);
210 static ssize_t size_show(struct device *dev,
211 struct device_attribute *attr, char *buf)
213 struct nd_region *nd_region = to_nd_region(dev);
214 unsigned long long size = 0;
216 if (is_nd_pmem(dev)) {
217 size = nd_region->ndr_size;
218 } else if (nd_region->ndr_mappings == 1) {
219 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
221 size = nd_mapping->size;
224 return sprintf(buf, "%llu\n", size);
226 static DEVICE_ATTR_RO(size);
228 static ssize_t mappings_show(struct device *dev,
229 struct device_attribute *attr, char *buf)
231 struct nd_region *nd_region = to_nd_region(dev);
233 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
235 static DEVICE_ATTR_RO(mappings);
237 static ssize_t nstype_show(struct device *dev,
238 struct device_attribute *attr, char *buf)
240 struct nd_region *nd_region = to_nd_region(dev);
242 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
244 static DEVICE_ATTR_RO(nstype);
246 static ssize_t set_cookie_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
249 struct nd_region *nd_region = to_nd_region(dev);
250 struct nd_interleave_set *nd_set = nd_region->nd_set;
252 if (is_nd_pmem(dev) && nd_set)
253 /* pass, should be precluded by region_visible */;
257 return sprintf(buf, "%#llx\n", nd_set->cookie);
259 static DEVICE_ATTR_RO(set_cookie);
261 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
263 resource_size_t blk_max_overlap = 0, available, overlap;
266 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
270 overlap = blk_max_overlap;
271 for (i = 0; i < nd_region->ndr_mappings; i++) {
272 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
273 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
275 /* if a dimm is disabled the available capacity is zero */
279 if (is_nd_pmem(&nd_region->dev)) {
280 available += nd_pmem_available_dpa(nd_region,
281 nd_mapping, &overlap);
282 if (overlap > blk_max_overlap) {
283 blk_max_overlap = overlap;
286 } else if (is_nd_blk(&nd_region->dev)) {
287 available += nd_blk_available_dpa(nd_mapping);
294 static ssize_t available_size_show(struct device *dev,
295 struct device_attribute *attr, char *buf)
297 struct nd_region *nd_region = to_nd_region(dev);
298 unsigned long long available = 0;
301 * Flush in-flight updates and grab a snapshot of the available
302 * size. Of course, this value is potentially invalidated the
303 * memory nvdimm_bus_lock() is dropped, but that's userspace's
304 * problem to not race itself.
306 nvdimm_bus_lock(dev);
307 wait_nvdimm_bus_probe_idle(dev);
308 available = nd_region_available_dpa(nd_region);
309 nvdimm_bus_unlock(dev);
311 return sprintf(buf, "%llu\n", available);
313 static DEVICE_ATTR_RO(available_size);
315 static ssize_t init_namespaces_show(struct device *dev,
316 struct device_attribute *attr, char *buf)
318 struct nd_region_data *ndrd = dev_get_drvdata(dev);
321 nvdimm_bus_lock(dev);
323 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
326 nvdimm_bus_unlock(dev);
330 static DEVICE_ATTR_RO(init_namespaces);
332 static ssize_t namespace_seed_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
335 struct nd_region *nd_region = to_nd_region(dev);
338 nvdimm_bus_lock(dev);
339 if (nd_region->ns_seed)
340 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
342 rc = sprintf(buf, "\n");
343 nvdimm_bus_unlock(dev);
346 static DEVICE_ATTR_RO(namespace_seed);
348 static ssize_t btt_seed_show(struct device *dev,
349 struct device_attribute *attr, char *buf)
351 struct nd_region *nd_region = to_nd_region(dev);
354 nvdimm_bus_lock(dev);
355 if (nd_region->btt_seed)
356 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
358 rc = sprintf(buf, "\n");
359 nvdimm_bus_unlock(dev);
363 static DEVICE_ATTR_RO(btt_seed);
365 static ssize_t pfn_seed_show(struct device *dev,
366 struct device_attribute *attr, char *buf)
368 struct nd_region *nd_region = to_nd_region(dev);
371 nvdimm_bus_lock(dev);
372 if (nd_region->pfn_seed)
373 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
375 rc = sprintf(buf, "\n");
376 nvdimm_bus_unlock(dev);
380 static DEVICE_ATTR_RO(pfn_seed);
382 static ssize_t dax_seed_show(struct device *dev,
383 struct device_attribute *attr, char *buf)
385 struct nd_region *nd_region = to_nd_region(dev);
388 nvdimm_bus_lock(dev);
389 if (nd_region->dax_seed)
390 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
392 rc = sprintf(buf, "\n");
393 nvdimm_bus_unlock(dev);
397 static DEVICE_ATTR_RO(dax_seed);
399 static ssize_t read_only_show(struct device *dev,
400 struct device_attribute *attr, char *buf)
402 struct nd_region *nd_region = to_nd_region(dev);
404 return sprintf(buf, "%d\n", nd_region->ro);
407 static ssize_t read_only_store(struct device *dev,
408 struct device_attribute *attr, const char *buf, size_t len)
411 int rc = strtobool(buf, &ro);
412 struct nd_region *nd_region = to_nd_region(dev);
420 static DEVICE_ATTR_RW(read_only);
422 static struct attribute *nd_region_attributes[] = {
424 &dev_attr_nstype.attr,
425 &dev_attr_mappings.attr,
426 &dev_attr_btt_seed.attr,
427 &dev_attr_pfn_seed.attr,
428 &dev_attr_dax_seed.attr,
429 &dev_attr_read_only.attr,
430 &dev_attr_set_cookie.attr,
431 &dev_attr_available_size.attr,
432 &dev_attr_namespace_seed.attr,
433 &dev_attr_init_namespaces.attr,
437 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
439 struct device *dev = container_of(kobj, typeof(*dev), kobj);
440 struct nd_region *nd_region = to_nd_region(dev);
441 struct nd_interleave_set *nd_set = nd_region->nd_set;
442 int type = nd_region_to_nstype(nd_region);
444 if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
447 if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
450 if (a != &dev_attr_set_cookie.attr
451 && a != &dev_attr_available_size.attr)
454 if ((type == ND_DEVICE_NAMESPACE_PMEM
455 || type == ND_DEVICE_NAMESPACE_BLK)
456 && a == &dev_attr_available_size.attr)
458 else if (is_nd_pmem(dev) && nd_set)
464 struct attribute_group nd_region_attribute_group = {
465 .attrs = nd_region_attributes,
466 .is_visible = region_visible,
468 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
470 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
472 struct nd_interleave_set *nd_set = nd_region->nd_set;
475 return nd_set->cookie;
480 * Upon successful probe/remove, take/release a reference on the
481 * associated interleave set (if present), and plant new btt + namespace
482 * seeds. Also, on the removal of a BLK region, notify the provider to
483 * disable the region.
485 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
486 struct device *dev, bool probe)
488 struct nd_region *nd_region;
490 if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
493 nd_region = to_nd_region(dev);
494 for (i = 0; i < nd_region->ndr_mappings; i++) {
495 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
496 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
497 struct nvdimm *nvdimm = nd_mapping->nvdimm;
499 kfree(nd_mapping->labels);
500 nd_mapping->labels = NULL;
502 nd_mapping->ndd = NULL;
504 atomic_dec(&nvdimm->busy);
510 if (dev->parent && is_nd_blk(dev->parent) && probe) {
511 nd_region = to_nd_region(dev->parent);
512 nvdimm_bus_lock(dev);
513 if (nd_region->ns_seed == dev)
514 nd_region_create_blk_seed(nd_region);
515 nvdimm_bus_unlock(dev);
517 if (is_nd_btt(dev) && probe) {
518 struct nd_btt *nd_btt = to_nd_btt(dev);
520 nd_region = to_nd_region(dev->parent);
521 nvdimm_bus_lock(dev);
522 if (nd_region->btt_seed == dev)
523 nd_region_create_btt_seed(nd_region);
524 if (nd_region->ns_seed == &nd_btt->ndns->dev &&
525 is_nd_blk(dev->parent))
526 nd_region_create_blk_seed(nd_region);
527 nvdimm_bus_unlock(dev);
529 if (is_nd_pfn(dev) && probe) {
530 nd_region = to_nd_region(dev->parent);
531 nvdimm_bus_lock(dev);
532 if (nd_region->pfn_seed == dev)
533 nd_region_create_pfn_seed(nd_region);
534 nvdimm_bus_unlock(dev);
536 if (is_nd_dax(dev) && probe) {
537 nd_region = to_nd_region(dev->parent);
538 nvdimm_bus_lock(dev);
539 if (nd_region->dax_seed == dev)
540 nd_region_create_dax_seed(nd_region);
541 nvdimm_bus_unlock(dev);
545 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
547 nd_region_notify_driver_action(nvdimm_bus, dev, true);
550 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
552 nd_region_notify_driver_action(nvdimm_bus, dev, false);
555 static ssize_t mappingN(struct device *dev, char *buf, int n)
557 struct nd_region *nd_region = to_nd_region(dev);
558 struct nd_mapping *nd_mapping;
559 struct nvdimm *nvdimm;
561 if (n >= nd_region->ndr_mappings)
563 nd_mapping = &nd_region->mapping[n];
564 nvdimm = nd_mapping->nvdimm;
566 return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev),
567 nd_mapping->start, nd_mapping->size);
570 #define REGION_MAPPING(idx) \
571 static ssize_t mapping##idx##_show(struct device *dev, \
572 struct device_attribute *attr, char *buf) \
574 return mappingN(dev, buf, idx); \
576 static DEVICE_ATTR_RO(mapping##idx)
579 * 32 should be enough for a while, even in the presence of socket
580 * interleave a 32-way interleave set is a degenerate case.
615 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
617 struct device *dev = container_of(kobj, struct device, kobj);
618 struct nd_region *nd_region = to_nd_region(dev);
620 if (n < nd_region->ndr_mappings)
625 static struct attribute *mapping_attributes[] = {
626 &dev_attr_mapping0.attr,
627 &dev_attr_mapping1.attr,
628 &dev_attr_mapping2.attr,
629 &dev_attr_mapping3.attr,
630 &dev_attr_mapping4.attr,
631 &dev_attr_mapping5.attr,
632 &dev_attr_mapping6.attr,
633 &dev_attr_mapping7.attr,
634 &dev_attr_mapping8.attr,
635 &dev_attr_mapping9.attr,
636 &dev_attr_mapping10.attr,
637 &dev_attr_mapping11.attr,
638 &dev_attr_mapping12.attr,
639 &dev_attr_mapping13.attr,
640 &dev_attr_mapping14.attr,
641 &dev_attr_mapping15.attr,
642 &dev_attr_mapping16.attr,
643 &dev_attr_mapping17.attr,
644 &dev_attr_mapping18.attr,
645 &dev_attr_mapping19.attr,
646 &dev_attr_mapping20.attr,
647 &dev_attr_mapping21.attr,
648 &dev_attr_mapping22.attr,
649 &dev_attr_mapping23.attr,
650 &dev_attr_mapping24.attr,
651 &dev_attr_mapping25.attr,
652 &dev_attr_mapping26.attr,
653 &dev_attr_mapping27.attr,
654 &dev_attr_mapping28.attr,
655 &dev_attr_mapping29.attr,
656 &dev_attr_mapping30.attr,
657 &dev_attr_mapping31.attr,
661 struct attribute_group nd_mapping_attribute_group = {
662 .is_visible = mapping_visible,
663 .attrs = mapping_attributes,
665 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
667 int nd_blk_region_init(struct nd_region *nd_region)
669 struct device *dev = &nd_region->dev;
670 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
675 if (nd_region->ndr_mappings < 1) {
676 dev_err(dev, "invalid BLK region\n");
680 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
684 * nd_region_acquire_lane - allocate and lock a lane
685 * @nd_region: region id and number of lanes possible
687 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
688 * We optimize for the common case where there are 256 lanes, one
689 * per-cpu. For larger systems we need to lock to share lanes. For now
690 * this implementation assumes the cost of maintaining an allocator for
691 * free lanes is on the order of the lock hold time, so it implements a
692 * static lane = cpu % num_lanes mapping.
694 * In the case of a BTT instance on top of a BLK namespace a lane may be
695 * acquired recursively. We lock on the first instance.
697 * In the case of a BTT instance on top of PMEM, we only acquire a lane
698 * for the BTT metadata updates.
700 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
702 unsigned int cpu, lane;
705 if (nd_region->num_lanes < nr_cpu_ids) {
706 struct nd_percpu_lane *ndl_lock, *ndl_count;
708 lane = cpu % nd_region->num_lanes;
709 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
710 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
711 if (ndl_count->count++ == 0)
712 spin_lock(&ndl_lock->lock);
718 EXPORT_SYMBOL(nd_region_acquire_lane);
720 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
722 if (nd_region->num_lanes < nr_cpu_ids) {
723 unsigned int cpu = get_cpu();
724 struct nd_percpu_lane *ndl_lock, *ndl_count;
726 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
727 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
728 if (--ndl_count->count == 0)
729 spin_unlock(&ndl_lock->lock);
734 EXPORT_SYMBOL(nd_region_release_lane);
736 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
737 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
740 struct nd_region *nd_region;
746 for (i = 0; i < ndr_desc->num_mappings; i++) {
747 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
748 struct nvdimm *nvdimm = nd_mapping->nvdimm;
750 if ((nd_mapping->start | nd_mapping->size) % SZ_4K) {
751 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
752 caller, dev_name(&nvdimm->dev), i);
757 if (nvdimm->flags & NDD_UNARMED)
761 if (dev_type == &nd_blk_device_type) {
762 struct nd_blk_region_desc *ndbr_desc;
763 struct nd_blk_region *ndbr;
765 ndbr_desc = to_blk_region_desc(ndr_desc);
766 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
767 * ndr_desc->num_mappings,
770 nd_region = &ndbr->nd_region;
771 ndbr->enable = ndbr_desc->enable;
772 ndbr->do_io = ndbr_desc->do_io;
776 nd_region = kzalloc(sizeof(struct nd_region)
777 + sizeof(struct nd_mapping)
778 * ndr_desc->num_mappings,
780 region_buf = nd_region;
785 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL);
786 if (nd_region->id < 0)
789 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
790 if (!nd_region->lane)
793 for (i = 0; i < nr_cpu_ids; i++) {
794 struct nd_percpu_lane *ndl;
796 ndl = per_cpu_ptr(nd_region->lane, i);
797 spin_lock_init(&ndl->lock);
801 memcpy(nd_region->mapping, ndr_desc->nd_mapping,
802 sizeof(struct nd_mapping) * ndr_desc->num_mappings);
803 for (i = 0; i < ndr_desc->num_mappings; i++) {
804 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
805 struct nvdimm *nvdimm = nd_mapping->nvdimm;
807 get_device(&nvdimm->dev);
809 nd_region->ndr_mappings = ndr_desc->num_mappings;
810 nd_region->provider_data = ndr_desc->provider_data;
811 nd_region->nd_set = ndr_desc->nd_set;
812 nd_region->num_lanes = ndr_desc->num_lanes;
813 nd_region->flags = ndr_desc->flags;
815 nd_region->numa_node = ndr_desc->numa_node;
816 ida_init(&nd_region->ns_ida);
817 ida_init(&nd_region->btt_ida);
818 ida_init(&nd_region->pfn_ida);
819 ida_init(&nd_region->dax_ida);
820 dev = &nd_region->dev;
821 dev_set_name(dev, "region%d", nd_region->id);
822 dev->parent = &nvdimm_bus->dev;
823 dev->type = dev_type;
824 dev->groups = ndr_desc->attr_groups;
825 nd_region->ndr_size = resource_size(ndr_desc->res);
826 nd_region->ndr_start = ndr_desc->res->start;
827 nd_device_register(dev);
832 ida_simple_remove(®ion_ida, nd_region->id);
838 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
839 struct nd_region_desc *ndr_desc)
841 ndr_desc->num_lanes = ND_MAX_LANES;
842 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
845 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
847 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
848 struct nd_region_desc *ndr_desc)
850 if (ndr_desc->num_mappings > 1)
852 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
853 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
856 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
858 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
859 struct nd_region_desc *ndr_desc)
861 ndr_desc->num_lanes = ND_MAX_LANES;
862 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
865 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
867 void __exit nd_region_devs_exit(void)
869 ida_destroy(®ion_ida);