2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/ndctl.h>
17 #include <linux/slab.h>
25 static DEFINE_IDA(dimm_ida);
28 * Retrieve bus and dimm handle and return if this bus supports
29 * get_config_data commands
31 int nvdimm_check_config_data(struct device *dev)
33 struct nvdimm *nvdimm = to_nvdimm(dev);
35 if (!nvdimm->cmd_mask ||
36 !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
37 if (nvdimm->flags & NDD_ALIASING)
46 static int validate_dimm(struct nvdimm_drvdata *ndd)
53 rc = nvdimm_check_config_data(ndd->dev);
55 dev_dbg(ndd->dev, "%pf: %s error: %d\n",
56 __builtin_return_address(0), __func__, rc);
61 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
62 * @nvdimm: dimm to initialize
64 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
66 struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
67 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
68 struct nvdimm_bus_descriptor *nd_desc;
69 int rc = validate_dimm(ndd);
75 return 0; /* already valid */
77 memset(cmd, 0, sizeof(*cmd));
78 nd_desc = nvdimm_bus->nd_desc;
79 return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
80 ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL);
83 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
85 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
86 struct nd_cmd_get_config_data_hdr *cmd;
87 struct nvdimm_bus_descriptor *nd_desc;
88 int rc = validate_dimm(ndd);
89 u32 max_cmd_size, config_size;
98 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
99 || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
100 dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
101 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
105 ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL);
107 ndd->data = vmalloc(ndd->nsarea.config_size);
112 max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
113 cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
117 nd_desc = nvdimm_bus->nd_desc;
118 for (config_size = ndd->nsarea.config_size, offset = 0;
119 config_size; config_size -= cmd->in_length,
120 offset += cmd->in_length) {
121 cmd->in_length = min(config_size, max_cmd_size);
122 cmd->in_offset = offset;
123 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
124 ND_CMD_GET_CONFIG_DATA, cmd,
125 cmd->in_length + sizeof(*cmd), NULL);
126 if (rc || cmd->status) {
130 memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
132 dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc);
138 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
139 void *buf, size_t len)
141 int rc = validate_dimm(ndd);
142 size_t max_cmd_size, buf_offset;
143 struct nd_cmd_set_config_hdr *cmd;
144 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
145 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
153 if (offset + len > ndd->nsarea.config_size)
156 max_cmd_size = min_t(u32, PAGE_SIZE, len);
157 max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
158 cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
162 for (buf_offset = 0; len; len -= cmd->in_length,
163 buf_offset += cmd->in_length) {
167 cmd->in_offset = offset + buf_offset;
168 cmd->in_length = min(max_cmd_size, len);
169 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
171 /* status is output in the last 4-bytes of the command buffer */
172 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
173 status = ((void *) cmd) + cmd_size - sizeof(u32);
175 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
176 ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
178 rc = rc ? rc : -ENXIO;
187 static void nvdimm_release(struct device *dev)
189 struct nvdimm *nvdimm = to_nvdimm(dev);
191 ida_simple_remove(&dimm_ida, nvdimm->id);
195 static struct device_type nvdimm_device_type = {
197 .release = nvdimm_release,
200 bool is_nvdimm(struct device *dev)
202 return dev->type == &nvdimm_device_type;
205 struct nvdimm *to_nvdimm(struct device *dev)
207 struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
209 WARN_ON(!is_nvdimm(dev));
212 EXPORT_SYMBOL_GPL(to_nvdimm);
214 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
216 struct nd_region *nd_region = &ndbr->nd_region;
217 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
219 return nd_mapping->nvdimm;
221 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
223 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
225 struct nvdimm *nvdimm = nd_mapping->nvdimm;
227 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
229 return dev_get_drvdata(&nvdimm->dev);
231 EXPORT_SYMBOL(to_ndd);
233 void nvdimm_drvdata_release(struct kref *kref)
235 struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
236 struct device *dev = ndd->dev;
237 struct resource *res, *_r;
239 dev_dbg(dev, "%s\n", __func__);
241 nvdimm_bus_lock(dev);
242 for_each_dpa_resource_safe(ndd, res, _r)
243 nvdimm_free_dpa(ndd, res);
244 nvdimm_bus_unlock(dev);
251 void get_ndd(struct nvdimm_drvdata *ndd)
253 kref_get(&ndd->kref);
256 void put_ndd(struct nvdimm_drvdata *ndd)
259 kref_put(&ndd->kref, nvdimm_drvdata_release);
262 const char *nvdimm_name(struct nvdimm *nvdimm)
264 return dev_name(&nvdimm->dev);
266 EXPORT_SYMBOL_GPL(nvdimm_name);
268 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
270 return &nvdimm->dev.kobj;
272 EXPORT_SYMBOL_GPL(nvdimm_kobj);
274 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
276 return nvdimm->cmd_mask;
278 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
280 void *nvdimm_provider_data(struct nvdimm *nvdimm)
283 return nvdimm->provider_data;
286 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
288 static ssize_t commands_show(struct device *dev,
289 struct device_attribute *attr, char *buf)
291 struct nvdimm *nvdimm = to_nvdimm(dev);
294 if (!nvdimm->cmd_mask)
295 return sprintf(buf, "\n");
297 for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
298 len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
299 len += sprintf(buf + len, "\n");
302 static DEVICE_ATTR_RO(commands);
304 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
307 struct nvdimm *nvdimm = to_nvdimm(dev);
310 * The state may be in the process of changing, userspace should
311 * quiesce probing if it wants a static answer
313 nvdimm_bus_lock(dev);
314 nvdimm_bus_unlock(dev);
315 return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
316 ? "active" : "idle");
318 static DEVICE_ATTR_RO(state);
320 static ssize_t available_slots_show(struct device *dev,
321 struct device_attribute *attr, char *buf)
323 struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
330 nvdimm_bus_lock(dev);
331 nfree = nd_label_nfree(ndd);
332 if (nfree - 1 > nfree) {
333 dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
337 rc = sprintf(buf, "%d\n", nfree);
338 nvdimm_bus_unlock(dev);
341 static DEVICE_ATTR_RO(available_slots);
343 static struct attribute *nvdimm_attributes[] = {
344 &dev_attr_state.attr,
345 &dev_attr_commands.attr,
346 &dev_attr_available_slots.attr,
350 struct attribute_group nvdimm_attribute_group = {
351 .attrs = nvdimm_attributes,
353 EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
355 struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
356 const struct attribute_group **groups, unsigned long flags,
357 unsigned long cmd_mask, int num_flush,
358 struct resource *flush_wpq)
360 struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
366 nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
367 if (nvdimm->id < 0) {
371 nvdimm->provider_data = provider_data;
372 nvdimm->flags = flags;
373 nvdimm->cmd_mask = cmd_mask;
374 nvdimm->num_flush = num_flush;
375 nvdimm->flush_wpq = flush_wpq;
376 atomic_set(&nvdimm->busy, 0);
378 dev_set_name(dev, "nmem%d", nvdimm->id);
379 dev->parent = &nvdimm_bus->dev;
380 dev->type = &nvdimm_device_type;
381 dev->devt = MKDEV(nvdimm_major, nvdimm->id);
382 dev->groups = groups;
383 nd_device_register(dev);
387 EXPORT_SYMBOL_GPL(nvdimm_create);
389 struct blk_alloc_info {
390 struct nd_mapping *nd_mapping;
391 resource_size_t available, busy;
392 struct resource *res;
395 static int alias_dpa_busy(struct device *dev, void *data)
397 resource_size_t map_end, blk_start, new, busy;
398 struct blk_alloc_info *info = data;
399 struct nd_mapping *nd_mapping;
400 struct nd_region *nd_region;
401 struct nvdimm_drvdata *ndd;
402 struct resource *res;
405 if (!is_nd_pmem(dev))
408 nd_region = to_nd_region(dev);
409 for (i = 0; i < nd_region->ndr_mappings; i++) {
410 nd_mapping = &nd_region->mapping[i];
411 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
415 if (i >= nd_region->ndr_mappings)
418 ndd = to_ndd(nd_mapping);
419 map_end = nd_mapping->start + nd_mapping->size - 1;
420 blk_start = nd_mapping->start;
423 * Find the free dpa from the end of the last pmem allocation to
424 * the end of the interleave-set mapping that is not already
425 * covered by a blk allocation.
428 for_each_dpa_resource(ndd, res) {
429 if ((res->start >= blk_start && res->start < map_end)
430 || (res->end >= blk_start
431 && res->end <= map_end)) {
432 if (strncmp(res->name, "pmem", 4) == 0) {
433 new = max(blk_start, min(map_end + 1,
435 if (new != blk_start) {
440 busy += min(map_end, res->end)
441 - max(nd_mapping->start, res->start) + 1;
442 } else if (nd_mapping->start > res->start
443 && map_end < res->end) {
444 /* total eclipse of the PMEM region mapping */
445 busy += nd_mapping->size;
450 info->available -= blk_start - nd_mapping->start + busy;
454 static int blk_dpa_busy(struct device *dev, void *data)
456 struct blk_alloc_info *info = data;
457 struct nd_mapping *nd_mapping;
458 struct nd_region *nd_region;
459 resource_size_t map_end;
462 if (!is_nd_pmem(dev))
465 nd_region = to_nd_region(dev);
466 for (i = 0; i < nd_region->ndr_mappings; i++) {
467 nd_mapping = &nd_region->mapping[i];
468 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
472 if (i >= nd_region->ndr_mappings)
475 map_end = nd_mapping->start + nd_mapping->size - 1;
476 if (info->res->start >= nd_mapping->start
477 && info->res->start < map_end) {
478 if (info->res->end <= map_end) {
482 info->busy -= info->res->end - map_end;
485 } else if (info->res->end >= nd_mapping->start
486 && info->res->end <= map_end) {
487 info->busy -= nd_mapping->start - info->res->start;
490 info->busy -= nd_mapping->size;
496 * nd_blk_available_dpa - account the unused dpa of BLK region
497 * @nd_mapping: container of dpa-resource-root + labels
499 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
500 * we arrange for them to never start at an lower dpa than the last
501 * PMEM allocation in an aliased region.
503 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
505 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
506 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
507 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
508 struct blk_alloc_info info = {
509 .nd_mapping = nd_mapping,
510 .available = nd_mapping->size,
512 struct resource *res;
517 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
519 /* now account for busy blk allocations in unaliased dpa */
520 for_each_dpa_resource(ndd, res) {
521 if (strncmp(res->name, "blk", 3) != 0)
525 info.busy = resource_size(res);
526 device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
527 info.available -= info.busy;
530 return info.available;
534 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
535 * @nd_mapping: container of dpa-resource-root + labels
536 * @nd_region: constrain available space check to this reference region
537 * @overlap: calculate available space assuming this level of overlap
539 * Validate that a PMEM label, if present, aligns with the start of an
540 * interleave set and truncate the available size at the lowest BLK
543 * The expectation is that this routine is called multiple times as it
544 * probes for the largest BLK encroachment for any single member DIMM of
545 * the interleave set. Once that value is determined the PMEM-limit for
546 * the set can be established.
548 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
549 struct nd_mapping *nd_mapping, resource_size_t *overlap)
551 resource_size_t map_start, map_end, busy = 0, available, blk_start;
552 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
553 struct resource *res;
559 map_start = nd_mapping->start;
560 map_end = map_start + nd_mapping->size - 1;
561 blk_start = max(map_start, map_end + 1 - *overlap);
562 for_each_dpa_resource(ndd, res) {
563 if (res->start >= map_start && res->start < map_end) {
564 if (strncmp(res->name, "blk", 3) == 0)
565 blk_start = min(blk_start,
566 max(map_start, res->start));
567 else if (res->end > map_end) {
568 reason = "misaligned to iset";
571 busy += resource_size(res);
572 } else if (res->end >= map_start && res->end <= map_end) {
573 if (strncmp(res->name, "blk", 3) == 0) {
575 * If a BLK allocation overlaps the start of
576 * PMEM the entire interleave set may now only
579 blk_start = map_start;
581 busy += resource_size(res);
582 } else if (map_start > res->start && map_start < res->end) {
583 /* total eclipse of the mapping */
584 busy += nd_mapping->size;
585 blk_start = map_start;
589 *overlap = map_end + 1 - blk_start;
590 available = blk_start - map_start;
591 if (busy < available)
592 return available - busy;
596 nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
600 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
602 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
604 __release_region(&ndd->dpa, res->start, resource_size(res));
607 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
608 struct nd_label_id *label_id, resource_size_t start,
611 char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
612 struct resource *res;
617 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
618 res = __request_region(&ndd->dpa, start, n, name, 0);
625 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
626 * @nvdimm: container of dpa-resource-root + labels
627 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
629 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
630 struct nd_label_id *label_id)
632 resource_size_t allocated = 0;
633 struct resource *res;
635 for_each_dpa_resource(ndd, res)
636 if (strcmp(res->name, label_id->id) == 0)
637 allocated += resource_size(res);
642 static int count_dimms(struct device *dev, void *c)
651 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
654 /* Flush any possible dimm registration failures */
657 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
658 dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count);
659 if (count != dimm_count)
663 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
665 void __exit nvdimm_devs_exit(void)
667 ida_destroy(&dimm_ida);