Merge tag 'mmc-v4.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[cascardo/linux.git] / drivers / nvdimm / dimm_devs.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/ndctl.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/fs.h>
20 #include <linux/mm.h>
21 #include "nd-core.h"
22 #include "label.h"
23 #include "nd.h"
24
25 static DEFINE_IDA(dimm_ida);
26
27 /*
28  * Retrieve bus and dimm handle and return if this bus supports
29  * get_config_data commands
30  */
31 int nvdimm_check_config_data(struct device *dev)
32 {
33         struct nvdimm *nvdimm = to_nvdimm(dev);
34
35         if (!nvdimm->cmd_mask ||
36             !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
37                 if (nvdimm->flags & NDD_ALIASING)
38                         return -ENXIO;
39                 else
40                         return -ENOTTY;
41         }
42
43         return 0;
44 }
45
46 static int validate_dimm(struct nvdimm_drvdata *ndd)
47 {
48         int rc;
49
50         if (!ndd)
51                 return -EINVAL;
52
53         rc = nvdimm_check_config_data(ndd->dev);
54         if (rc)
55                 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
56                                 __builtin_return_address(0), __func__, rc);
57         return rc;
58 }
59
60 /**
61  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
62  * @nvdimm: dimm to initialize
63  */
64 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
65 {
66         struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
67         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
68         struct nvdimm_bus_descriptor *nd_desc;
69         int rc = validate_dimm(ndd);
70
71         if (rc)
72                 return rc;
73
74         if (cmd->config_size)
75                 return 0; /* already valid */
76
77         memset(cmd, 0, sizeof(*cmd));
78         nd_desc = nvdimm_bus->nd_desc;
79         return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
80                         ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL);
81 }
82
83 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
84 {
85         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
86         struct nd_cmd_get_config_data_hdr *cmd;
87         struct nvdimm_bus_descriptor *nd_desc;
88         int rc = validate_dimm(ndd);
89         u32 max_cmd_size, config_size;
90         size_t offset;
91
92         if (rc)
93                 return rc;
94
95         if (ndd->data)
96                 return 0;
97
98         if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
99                         || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
100                 dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
101                                 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
102                 return -ENXIO;
103         }
104
105         ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
106         if (!ndd->data)
107                 ndd->data = vmalloc(ndd->nsarea.config_size);
108
109         if (!ndd->data)
110                 return -ENOMEM;
111
112         max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
113         cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
114         if (!cmd)
115                 return -ENOMEM;
116
117         nd_desc = nvdimm_bus->nd_desc;
118         for (config_size = ndd->nsarea.config_size, offset = 0;
119                         config_size; config_size -= cmd->in_length,
120                         offset += cmd->in_length) {
121                 cmd->in_length = min(config_size, max_cmd_size);
122                 cmd->in_offset = offset;
123                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
124                                 ND_CMD_GET_CONFIG_DATA, cmd,
125                                 cmd->in_length + sizeof(*cmd), NULL);
126                 if (rc || cmd->status) {
127                         rc = -ENXIO;
128                         break;
129                 }
130                 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
131         }
132         dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
133         kfree(cmd);
134
135         return rc;
136 }
137
138 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
139                 void *buf, size_t len)
140 {
141         int rc = validate_dimm(ndd);
142         size_t max_cmd_size, buf_offset;
143         struct nd_cmd_set_config_hdr *cmd;
144         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
145         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
146
147         if (rc)
148                 return rc;
149
150         if (!ndd->data)
151                 return -ENXIO;
152
153         if (offset + len > ndd->nsarea.config_size)
154                 return -ENXIO;
155
156         max_cmd_size = min_t(u32, PAGE_SIZE, len);
157         max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
158         cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
159         if (!cmd)
160                 return -ENOMEM;
161
162         for (buf_offset = 0; len; len -= cmd->in_length,
163                         buf_offset += cmd->in_length) {
164                 size_t cmd_size;
165                 u32 *status;
166
167                 cmd->in_offset = offset + buf_offset;
168                 cmd->in_length = min(max_cmd_size, len);
169                 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
170
171                 /* status is output in the last 4-bytes of the command buffer */
172                 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
173                 status = ((void *) cmd) + cmd_size - sizeof(u32);
174
175                 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
176                                 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
177                 if (rc || *status) {
178                         rc = rc ? rc : -ENXIO;
179                         break;
180                 }
181         }
182         kfree(cmd);
183
184         return rc;
185 }
186
187 static void nvdimm_release(struct device *dev)
188 {
189         struct nvdimm *nvdimm = to_nvdimm(dev);
190
191         ida_simple_remove(&dimm_ida, nvdimm->id);
192         kfree(nvdimm);
193 }
194
195 static struct device_type nvdimm_device_type = {
196         .name = "nvdimm",
197         .release = nvdimm_release,
198 };
199
200 bool is_nvdimm(struct device *dev)
201 {
202         return dev->type == &nvdimm_device_type;
203 }
204
205 struct nvdimm *to_nvdimm(struct device *dev)
206 {
207         struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
208
209         WARN_ON(!is_nvdimm(dev));
210         return nvdimm;
211 }
212 EXPORT_SYMBOL_GPL(to_nvdimm);
213
214 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
215 {
216         struct nd_region *nd_region = &ndbr->nd_region;
217         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
218
219         return nd_mapping->nvdimm;
220 }
221 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
222
223 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
224 {
225         struct nvdimm *nvdimm = nd_mapping->nvdimm;
226
227         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
228
229         return dev_get_drvdata(&nvdimm->dev);
230 }
231 EXPORT_SYMBOL(to_ndd);
232
233 void nvdimm_drvdata_release(struct kref *kref)
234 {
235         struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
236         struct device *dev = ndd->dev;
237         struct resource *res, *_r;
238
239         dev_dbg(dev, "%s\n", __func__);
240
241         nvdimm_bus_lock(dev);
242         for_each_dpa_resource_safe(ndd, res, _r)
243                 nvdimm_free_dpa(ndd, res);
244         nvdimm_bus_unlock(dev);
245
246         kvfree(ndd->data);
247         kfree(ndd);
248         put_device(dev);
249 }
250
251 void get_ndd(struct nvdimm_drvdata *ndd)
252 {
253         kref_get(&ndd->kref);
254 }
255
256 void put_ndd(struct nvdimm_drvdata *ndd)
257 {
258         if (ndd)
259                 kref_put(&ndd->kref, nvdimm_drvdata_release);
260 }
261
262 const char *nvdimm_name(struct nvdimm *nvdimm)
263 {
264         return dev_name(&nvdimm->dev);
265 }
266 EXPORT_SYMBOL_GPL(nvdimm_name);
267
268 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
269 {
270         return &nvdimm->dev.kobj;
271 }
272 EXPORT_SYMBOL_GPL(nvdimm_kobj);
273
274 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
275 {
276         return nvdimm->cmd_mask;
277 }
278 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
279
280 void *nvdimm_provider_data(struct nvdimm *nvdimm)
281 {
282         if (nvdimm)
283                 return nvdimm->provider_data;
284         return NULL;
285 }
286 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
287
288 static ssize_t commands_show(struct device *dev,
289                 struct device_attribute *attr, char *buf)
290 {
291         struct nvdimm *nvdimm = to_nvdimm(dev);
292         int cmd, len = 0;
293
294         if (!nvdimm->cmd_mask)
295                 return sprintf(buf, "\n");
296
297         for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
298                 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
299         len += sprintf(buf + len, "\n");
300         return len;
301 }
302 static DEVICE_ATTR_RO(commands);
303
304 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
305                 char *buf)
306 {
307         struct nvdimm *nvdimm = to_nvdimm(dev);
308
309         /*
310          * The state may be in the process of changing, userspace should
311          * quiesce probing if it wants a static answer
312          */
313         nvdimm_bus_lock(dev);
314         nvdimm_bus_unlock(dev);
315         return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
316                         ? "active" : "idle");
317 }
318 static DEVICE_ATTR_RO(state);
319
320 static ssize_t available_slots_show(struct device *dev,
321                 struct device_attribute *attr, char *buf)
322 {
323         struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
324         ssize_t rc;
325         u32 nfree;
326
327         if (!ndd)
328                 return -ENXIO;
329
330         nvdimm_bus_lock(dev);
331         nfree = nd_label_nfree(ndd);
332         if (nfree - 1 > nfree) {
333                 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
334                 nfree = 0;
335         } else
336                 nfree--;
337         rc = sprintf(buf, "%d\n", nfree);
338         nvdimm_bus_unlock(dev);
339         return rc;
340 }
341 static DEVICE_ATTR_RO(available_slots);
342
343 static struct attribute *nvdimm_attributes[] = {
344         &dev_attr_state.attr,
345         &dev_attr_commands.attr,
346         &dev_attr_available_slots.attr,
347         NULL,
348 };
349
350 struct attribute_group nvdimm_attribute_group = {
351         .attrs = nvdimm_attributes,
352 };
353 EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
354
355 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
356                 const struct attribute_group **groups, unsigned long flags,
357                 unsigned long cmd_mask, int num_flush,
358                 struct resource *flush_wpq)
359 {
360         struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
361         struct device *dev;
362
363         if (!nvdimm)
364                 return NULL;
365
366         nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
367         if (nvdimm->id < 0) {
368                 kfree(nvdimm);
369                 return NULL;
370         }
371         nvdimm->provider_data = provider_data;
372         nvdimm->flags = flags;
373         nvdimm->cmd_mask = cmd_mask;
374         nvdimm->num_flush = num_flush;
375         nvdimm->flush_wpq = flush_wpq;
376         atomic_set(&nvdimm->busy, 0);
377         dev = &nvdimm->dev;
378         dev_set_name(dev, "nmem%d", nvdimm->id);
379         dev->parent = &nvdimm_bus->dev;
380         dev->type = &nvdimm_device_type;
381         dev->devt = MKDEV(nvdimm_major, nvdimm->id);
382         dev->groups = groups;
383         nd_device_register(dev);
384
385         return nvdimm;
386 }
387 EXPORT_SYMBOL_GPL(nvdimm_create);
388
389 int alias_dpa_busy(struct device *dev, void *data)
390 {
391         resource_size_t map_end, blk_start, new, busy;
392         struct blk_alloc_info *info = data;
393         struct nd_mapping *nd_mapping;
394         struct nd_region *nd_region;
395         struct nvdimm_drvdata *ndd;
396         struct resource *res;
397         int i;
398
399         if (!is_nd_pmem(dev))
400                 return 0;
401
402         nd_region = to_nd_region(dev);
403         for (i = 0; i < nd_region->ndr_mappings; i++) {
404                 nd_mapping  = &nd_region->mapping[i];
405                 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
406                         break;
407         }
408
409         if (i >= nd_region->ndr_mappings)
410                 return 0;
411
412         ndd = to_ndd(nd_mapping);
413         map_end = nd_mapping->start + nd_mapping->size - 1;
414         blk_start = nd_mapping->start;
415
416         /*
417          * In the allocation case ->res is set to free space that we are
418          * looking to validate against PMEM aliasing collision rules
419          * (i.e. BLK is allocated after all aliased PMEM).
420          */
421         if (info->res) {
422                 if (info->res->start >= nd_mapping->start
423                                 && info->res->start < map_end)
424                         /* pass */;
425                 else
426                         return 0;
427         }
428
429  retry:
430         /*
431          * Find the free dpa from the end of the last pmem allocation to
432          * the end of the interleave-set mapping that is not already
433          * covered by a blk allocation.
434          */
435         busy = 0;
436         for_each_dpa_resource(ndd, res) {
437                 if ((res->start >= blk_start && res->start < map_end)
438                                 || (res->end >= blk_start
439                                         && res->end <= map_end)) {
440                         if (strncmp(res->name, "pmem", 4) == 0) {
441                                 new = max(blk_start, min(map_end + 1,
442                                                         res->end + 1));
443                                 if (new != blk_start) {
444                                         blk_start = new;
445                                         goto retry;
446                                 }
447                         } else
448                                 busy += min(map_end, res->end)
449                                         - max(nd_mapping->start, res->start) + 1;
450                 } else if (nd_mapping->start > res->start
451                                 && map_end < res->end) {
452                         /* total eclipse of the PMEM region mapping */
453                         busy += nd_mapping->size;
454                         break;
455                 }
456         }
457
458         /* update the free space range with the probed blk_start */
459         if (info->res && blk_start > info->res->start) {
460                 info->res->start = max(info->res->start, blk_start);
461                 if (info->res->start > info->res->end)
462                         info->res->end = info->res->start - 1;
463                 return 1;
464         }
465
466         info->available -= blk_start - nd_mapping->start + busy;
467
468         return 0;
469 }
470
471 static int blk_dpa_busy(struct device *dev, void *data)
472 {
473         struct blk_alloc_info *info = data;
474         struct nd_mapping *nd_mapping;
475         struct nd_region *nd_region;
476         resource_size_t map_end;
477         int i;
478
479         if (!is_nd_pmem(dev))
480                 return 0;
481
482         nd_region = to_nd_region(dev);
483         for (i = 0; i < nd_region->ndr_mappings; i++) {
484                 nd_mapping  = &nd_region->mapping[i];
485                 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
486                         break;
487         }
488
489         if (i >= nd_region->ndr_mappings)
490                 return 0;
491
492         map_end = nd_mapping->start + nd_mapping->size - 1;
493         if (info->res->start >= nd_mapping->start
494                         && info->res->start < map_end) {
495                 if (info->res->end <= map_end) {
496                         info->busy = 0;
497                         return 1;
498                 } else {
499                         info->busy -= info->res->end - map_end;
500                         return 0;
501                 }
502         } else if (info->res->end >= nd_mapping->start
503                         && info->res->end <= map_end) {
504                 info->busy -= nd_mapping->start - info->res->start;
505                 return 0;
506         } else {
507                 info->busy -= nd_mapping->size;
508                 return 0;
509         }
510 }
511
512 /**
513  * nd_blk_available_dpa - account the unused dpa of BLK region
514  * @nd_mapping: container of dpa-resource-root + labels
515  *
516  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
517  * we arrange for them to never start at an lower dpa than the last
518  * PMEM allocation in an aliased region.
519  */
520 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
521 {
522         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
523         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
524         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
525         struct blk_alloc_info info = {
526                 .nd_mapping = nd_mapping,
527                 .available = nd_mapping->size,
528                 .res = NULL,
529         };
530         struct resource *res;
531
532         if (!ndd)
533                 return 0;
534
535         device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
536
537         /* now account for busy blk allocations in unaliased dpa */
538         for_each_dpa_resource(ndd, res) {
539                 if (strncmp(res->name, "blk", 3) != 0)
540                         continue;
541
542                 info.res = res;
543                 info.busy = resource_size(res);
544                 device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
545                 info.available -= info.busy;
546         }
547
548         return info.available;
549 }
550
551 /**
552  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
553  * @nd_mapping: container of dpa-resource-root + labels
554  * @nd_region: constrain available space check to this reference region
555  * @overlap: calculate available space assuming this level of overlap
556  *
557  * Validate that a PMEM label, if present, aligns with the start of an
558  * interleave set and truncate the available size at the lowest BLK
559  * overlap point.
560  *
561  * The expectation is that this routine is called multiple times as it
562  * probes for the largest BLK encroachment for any single member DIMM of
563  * the interleave set.  Once that value is determined the PMEM-limit for
564  * the set can be established.
565  */
566 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
567                 struct nd_mapping *nd_mapping, resource_size_t *overlap)
568 {
569         resource_size_t map_start, map_end, busy = 0, available, blk_start;
570         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
571         struct resource *res;
572         const char *reason;
573
574         if (!ndd)
575                 return 0;
576
577         map_start = nd_mapping->start;
578         map_end = map_start + nd_mapping->size - 1;
579         blk_start = max(map_start, map_end + 1 - *overlap);
580         for_each_dpa_resource(ndd, res) {
581                 if (res->start >= map_start && res->start < map_end) {
582                         if (strncmp(res->name, "blk", 3) == 0)
583                                 blk_start = min(blk_start,
584                                                 max(map_start, res->start));
585                         else if (res->end > map_end) {
586                                 reason = "misaligned to iset";
587                                 goto err;
588                         } else
589                                 busy += resource_size(res);
590                 } else if (res->end >= map_start && res->end <= map_end) {
591                         if (strncmp(res->name, "blk", 3) == 0) {
592                                 /*
593                                  * If a BLK allocation overlaps the start of
594                                  * PMEM the entire interleave set may now only
595                                  * be used for BLK.
596                                  */
597                                 blk_start = map_start;
598                         } else
599                                 busy += resource_size(res);
600                 } else if (map_start > res->start && map_start < res->end) {
601                         /* total eclipse of the mapping */
602                         busy += nd_mapping->size;
603                         blk_start = map_start;
604                 }
605         }
606
607         *overlap = map_end + 1 - blk_start;
608         available = blk_start - map_start;
609         if (busy < available)
610                 return available - busy;
611         return 0;
612
613  err:
614         nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
615         return 0;
616 }
617
618 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
619 {
620         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
621         kfree(res->name);
622         __release_region(&ndd->dpa, res->start, resource_size(res));
623 }
624
625 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
626                 struct nd_label_id *label_id, resource_size_t start,
627                 resource_size_t n)
628 {
629         char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
630         struct resource *res;
631
632         if (!name)
633                 return NULL;
634
635         WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
636         res = __request_region(&ndd->dpa, start, n, name, 0);
637         if (!res)
638                 kfree(name);
639         return res;
640 }
641
642 /**
643  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
644  * @nvdimm: container of dpa-resource-root + labels
645  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
646  */
647 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
648                 struct nd_label_id *label_id)
649 {
650         resource_size_t allocated = 0;
651         struct resource *res;
652
653         for_each_dpa_resource(ndd, res)
654                 if (strcmp(res->name, label_id->id) == 0)
655                         allocated += resource_size(res);
656
657         return allocated;
658 }
659
660 static int count_dimms(struct device *dev, void *c)
661 {
662         int *count = c;
663
664         if (is_nvdimm(dev))
665                 (*count)++;
666         return 0;
667 }
668
669 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
670 {
671         int count = 0;
672         /* Flush any possible dimm registration failures */
673         nd_synchronize();
674
675         device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
676         dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
677         if (count != dimm_count)
678                 return -ENXIO;
679         return 0;
680 }
681 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
682
683 void __exit nvdimm_devs_exit(void)
684 {
685         ida_destroy(&dimm_ida);
686 }