2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/libnvdimm.h>
17 #include <linux/vmalloc.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/ndctl.h>
22 #include <linux/sizes.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
27 #include "nfit_test.h"
30 * Generate an NFIT table to describe the following topology:
32 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
34 * (a) (b) DIMM BLK-REGION
35 * +----------+--------------+----------+---------+
36 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
37 * | imc0 +--+- - - - - region0 - - - -+----------+ +
38 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
39 * | +----------+--------------v----------v v
43 * | +-------------------------^----------^ ^
44 * +--+---+ | blk4.0 | pm1.0 | 2 region4
45 * | imc1 +--+-------------------------+----------+ +
46 * +------+ | blk5.0 | pm1.0 | 3 region5
47 * +-------------------------+----------+-+-------+
51 * +--+---+ (Hotplug DIMM)
52 * | +----------------------------------------------+
53 * +--+---+ | blk6.0/pm7.0 | 4 region6/7
54 * | imc0 +--+----------------------------------------------+
58 * *) In this layout we have four dimms and two memory controllers in one
59 * socket. Each unique interface (BLK or PMEM) to DPA space
60 * is identified by a region device with a dynamically assigned id.
62 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
63 * A single PMEM namespace "pm0.0" is created using half of the
64 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
65 * allocate from from the bottom of a region. The unallocated
66 * portion of REGION0 aliases with REGION2 and REGION3. That
67 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
68 * "blk3.0") starting at the base of each DIMM to offset (a) in those
69 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
70 * names that can be assigned to a namespace.
72 * *) In the last portion of dimm0 and dimm1 we have an interleaved
73 * SPA range, REGION1, that spans those two dimms as well as dimm2
74 * and dimm3. Some of REGION1 allocated to a PMEM namespace named
75 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
76 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
79 * *) The portion of dimm2 and dimm3 that do not participate in the
80 * REGION1 interleaved SPA range (i.e. the DPA address below offset
81 * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
82 * Note, that BLK namespaces need not be contiguous in DPA-space, and
83 * can consume aliased capacity from multiple interleave sets.
85 * BUS1: Legacy NVDIMM (single contiguous range)
88 * +---------------------+
89 * |---------------------|
91 * |---------------------|
92 * +---------------------+
94 * *) A NFIT-table may describe a simple system-physical-address range
95 * with no BLK aliasing. This type of region may optionally
96 * reference an NVDIMM.
102 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
103 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
105 LABEL_SIZE = SZ_128K,
106 SPA0_SIZE = DIMM_SIZE,
107 SPA1_SIZE = DIMM_SIZE*2,
108 SPA2_SIZE = DIMM_SIZE,
111 NUM_NFITS = 2, /* permit testing multiple NFITs per system */
114 struct nfit_test_dcr {
117 __u8 aperature[BDW_SIZE];
120 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
121 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
122 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
124 static u32 handle[NUM_DCR] = {
125 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
126 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
127 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
128 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
129 [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
133 struct acpi_nfit_desc acpi_desc;
134 struct platform_device pdev;
135 struct list_head resources;
142 dma_addr_t *dimm_dma;
144 dma_addr_t *flush_dma;
146 dma_addr_t *label_dma;
148 dma_addr_t *spa_set_dma;
149 struct nfit_test_dcr **dcr;
151 int (*alloc)(struct nfit_test *t);
152 void (*setup)(struct nfit_test *t);
155 struct nd_cmd_ars_status *ars_status;
156 unsigned long deadline;
161 static struct nfit_test *to_nfit_test(struct device *dev)
163 struct platform_device *pdev = to_platform_device(dev);
165 return container_of(pdev, struct nfit_test, pdev);
168 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
169 unsigned int buf_len)
171 if (buf_len < sizeof(*nd_cmd))
175 nd_cmd->config_size = LABEL_SIZE;
176 nd_cmd->max_xfer = SZ_4K;
181 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
182 *nd_cmd, unsigned int buf_len, void *label)
184 unsigned int len, offset = nd_cmd->in_offset;
187 if (buf_len < sizeof(*nd_cmd))
189 if (offset >= LABEL_SIZE)
191 if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
195 len = min(nd_cmd->in_length, LABEL_SIZE - offset);
196 memcpy(nd_cmd->out_buf, label + offset, len);
197 rc = buf_len - sizeof(*nd_cmd) - len;
202 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
203 unsigned int buf_len, void *label)
205 unsigned int len, offset = nd_cmd->in_offset;
209 if (buf_len < sizeof(*nd_cmd))
211 if (offset >= LABEL_SIZE)
213 if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
216 status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
218 len = min(nd_cmd->in_length, LABEL_SIZE - offset);
219 memcpy(label + offset, nd_cmd->in_buf, len);
220 rc = buf_len - sizeof(*nd_cmd) - (len + 4);
225 #define NFIT_TEST_ARS_RECORDS 4
226 #define NFIT_TEST_CLEAR_ERR_UNIT 256
228 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
229 unsigned int buf_len)
231 if (buf_len < sizeof(*nd_cmd))
234 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
235 + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
236 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
237 nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
243 * Initialize the ars_state to return an ars_result 1 second in the future with
244 * a 4K error range in the middle of the requested address range.
246 static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
248 struct nd_cmd_ars_status *ars_status;
249 struct nd_ars_record *ars_record;
251 ars_state->deadline = jiffies + 1*HZ;
252 ars_status = ars_state->ars_status;
253 ars_status->status = 0;
254 ars_status->out_length = sizeof(struct nd_cmd_ars_status)
255 + sizeof(struct nd_ars_record);
256 ars_status->address = addr;
257 ars_status->length = len;
258 ars_status->type = ND_ARS_PERSISTENT;
259 ars_status->num_records = 1;
260 ars_record = &ars_status->records[0];
261 ars_record->handle = 0;
262 ars_record->err_address = addr + len / 2;
263 ars_record->length = SZ_4K;
266 static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
267 struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
270 if (buf_len < sizeof(*ars_start))
273 spin_lock(&ars_state->lock);
274 if (time_before(jiffies, ars_state->deadline)) {
275 ars_start->status = NFIT_ARS_START_BUSY;
278 ars_start->status = 0;
279 ars_start->scrub_time = 1;
280 post_ars_status(ars_state, ars_start->address,
284 spin_unlock(&ars_state->lock);
289 static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
290 struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
293 if (buf_len < ars_state->ars_status->out_length)
296 spin_lock(&ars_state->lock);
297 if (time_before(jiffies, ars_state->deadline)) {
298 memset(ars_status, 0, buf_len);
299 ars_status->status = NFIT_ARS_STATUS_BUSY;
300 ars_status->out_length = sizeof(*ars_status);
303 memcpy(ars_status, ars_state->ars_status,
304 ars_state->ars_status->out_length);
307 spin_unlock(&ars_state->lock);
311 static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
312 unsigned int buf_len, int *cmd_rc)
314 const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
315 if (buf_len < sizeof(*clear_err))
318 if ((clear_err->address & mask) || (clear_err->length & mask))
322 * Report 'all clear' success for all commands even though a new
323 * scrub will find errors again. This is enough to have the
324 * error removed from the 'badblocks' tracking in the pmem
327 clear_err->status = 0;
328 clear_err->cleared = clear_err->length;
333 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
334 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
335 unsigned int buf_len, int *cmd_rc)
337 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
338 struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
339 int i, rc = 0, __cmd_rc;
346 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
347 unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
349 if (!nfit_mem || !test_bit(cmd, &cmd_mask))
352 /* lookup label space for the given dimm */
353 for (i = 0; i < ARRAY_SIZE(handle); i++)
354 if (__to_nfit_memdev(nfit_mem)->device_handle ==
357 if (i >= ARRAY_SIZE(handle))
361 case ND_CMD_GET_CONFIG_SIZE:
362 rc = nfit_test_cmd_get_config_size(buf, buf_len);
364 case ND_CMD_GET_CONFIG_DATA:
365 rc = nfit_test_cmd_get_config_data(buf, buf_len,
368 case ND_CMD_SET_CONFIG_DATA:
369 rc = nfit_test_cmd_set_config_data(buf, buf_len,
376 struct ars_state *ars_state = &t->ars_state;
378 if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
383 rc = nfit_test_cmd_ars_cap(buf, buf_len);
385 case ND_CMD_ARS_START:
386 rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
389 case ND_CMD_ARS_STATUS:
390 rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
393 case ND_CMD_CLEAR_ERROR:
394 rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
404 static DEFINE_SPINLOCK(nfit_test_lock);
405 static struct nfit_test *instances[NUM_NFITS];
407 static void release_nfit_res(void *data)
409 struct nfit_test_resource *nfit_res = data;
410 struct resource *res = nfit_res->res;
412 spin_lock(&nfit_test_lock);
413 list_del(&nfit_res->list);
414 spin_unlock(&nfit_test_lock);
416 if (is_vmalloc_addr(nfit_res->buf))
417 vfree(nfit_res->buf);
419 dma_free_coherent(nfit_res->dev, resource_size(res),
420 nfit_res->buf, res->start);
425 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
428 struct device *dev = &t->pdev.dev;
429 struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
430 struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
434 if (!res || !buf || !nfit_res)
436 rc = devm_add_action(dev, release_nfit_res, nfit_res);
439 INIT_LIST_HEAD(&nfit_res->list);
440 memset(buf, 0, size);
445 res->end = *dma + size - 1;
447 spin_lock(&nfit_test_lock);
448 list_add(&nfit_res->list, &t->resources);
449 spin_unlock(&nfit_test_lock);
451 return nfit_res->buf;
453 if (buf && !is_vmalloc_addr(buf))
454 dma_free_coherent(dev, size, buf, *dma);
462 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
464 void *buf = vmalloc(size);
466 *dma = (unsigned long) buf;
467 return __test_alloc(t, size, dma, buf);
470 static void *test_alloc_coherent(struct nfit_test *t, size_t size,
473 struct device *dev = &t->pdev.dev;
474 void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
476 return __test_alloc(t, size, dma, buf);
479 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
483 for (i = 0; i < ARRAY_SIZE(instances); i++) {
484 struct nfit_test_resource *n, *nfit_res = NULL;
485 struct nfit_test *t = instances[i];
489 spin_lock(&nfit_test_lock);
490 list_for_each_entry(n, &t->resources, list) {
491 if (addr >= n->res->start && (addr < n->res->start
492 + resource_size(n->res))) {
495 } else if (addr >= (unsigned long) n->buf
496 && (addr < (unsigned long) n->buf
497 + resource_size(n->res))) {
502 spin_unlock(&nfit_test_lock);
510 static int ars_state_init(struct device *dev, struct ars_state *ars_state)
512 ars_state->ars_status = devm_kzalloc(dev,
513 sizeof(struct nd_cmd_ars_status)
514 + sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
516 if (!ars_state->ars_status)
518 spin_lock_init(&ars_state->lock);
522 static int nfit_test0_alloc(struct nfit_test *t)
524 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
525 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
526 + sizeof(struct acpi_nfit_control_region) * NUM_DCR
527 + offsetof(struct acpi_nfit_control_region,
528 window_size) * NUM_DCR
529 + sizeof(struct acpi_nfit_data_region) * NUM_BDW
530 + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
533 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
536 t->nfit_size = nfit_size;
538 t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]);
542 t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]);
546 t->spa_set[2] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[2]);
550 for (i = 0; i < NUM_DCR; i++) {
551 t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
555 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
558 sprintf(t->label[i], "label%d", i);
560 t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
565 for (i = 0; i < NUM_DCR; i++) {
566 t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
571 return ars_state_init(&t->pdev.dev, &t->ars_state);
574 static int nfit_test1_alloc(struct nfit_test *t)
576 size_t nfit_size = sizeof(struct acpi_nfit_system_address)
577 + sizeof(struct acpi_nfit_memory_map)
578 + offsetof(struct acpi_nfit_control_region, window_size);
580 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
583 t->nfit_size = nfit_size;
585 t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]);
589 return ars_state_init(&t->pdev.dev, &t->ars_state);
592 static void nfit_test0_setup(struct nfit_test *t)
594 struct acpi_nfit_desc *acpi_desc;
595 struct acpi_nfit_memory_map *memdev;
596 void *nfit_buf = t->nfit_buf;
597 struct acpi_nfit_system_address *spa;
598 struct acpi_nfit_control_region *dcr;
599 struct acpi_nfit_data_region *bdw;
600 struct acpi_nfit_flush_address *flush;
604 * spa0 (interleave first half of dimm0 and dimm1, note storage
605 * does not actually alias the related block-data-window
609 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
610 spa->header.length = sizeof(*spa);
611 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
612 spa->range_index = 0+1;
613 spa->address = t->spa_set_dma[0];
614 spa->length = SPA0_SIZE;
617 * spa1 (interleave last half of the 4 DIMMS, note storage
618 * does not actually alias the related block-data-window
621 spa = nfit_buf + sizeof(*spa);
622 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
623 spa->header.length = sizeof(*spa);
624 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
625 spa->range_index = 1+1;
626 spa->address = t->spa_set_dma[1];
627 spa->length = SPA1_SIZE;
629 /* spa2 (dcr0) dimm0 */
630 spa = nfit_buf + sizeof(*spa) * 2;
631 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
632 spa->header.length = sizeof(*spa);
633 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
634 spa->range_index = 2+1;
635 spa->address = t->dcr_dma[0];
636 spa->length = DCR_SIZE;
638 /* spa3 (dcr1) dimm1 */
639 spa = nfit_buf + sizeof(*spa) * 3;
640 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
641 spa->header.length = sizeof(*spa);
642 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
643 spa->range_index = 3+1;
644 spa->address = t->dcr_dma[1];
645 spa->length = DCR_SIZE;
647 /* spa4 (dcr2) dimm2 */
648 spa = nfit_buf + sizeof(*spa) * 4;
649 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
650 spa->header.length = sizeof(*spa);
651 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
652 spa->range_index = 4+1;
653 spa->address = t->dcr_dma[2];
654 spa->length = DCR_SIZE;
656 /* spa5 (dcr3) dimm3 */
657 spa = nfit_buf + sizeof(*spa) * 5;
658 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
659 spa->header.length = sizeof(*spa);
660 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
661 spa->range_index = 5+1;
662 spa->address = t->dcr_dma[3];
663 spa->length = DCR_SIZE;
665 /* spa6 (bdw for dcr0) dimm0 */
666 spa = nfit_buf + sizeof(*spa) * 6;
667 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
668 spa->header.length = sizeof(*spa);
669 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
670 spa->range_index = 6+1;
671 spa->address = t->dimm_dma[0];
672 spa->length = DIMM_SIZE;
674 /* spa7 (bdw for dcr1) dimm1 */
675 spa = nfit_buf + sizeof(*spa) * 7;
676 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
677 spa->header.length = sizeof(*spa);
678 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
679 spa->range_index = 7+1;
680 spa->address = t->dimm_dma[1];
681 spa->length = DIMM_SIZE;
683 /* spa8 (bdw for dcr2) dimm2 */
684 spa = nfit_buf + sizeof(*spa) * 8;
685 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
686 spa->header.length = sizeof(*spa);
687 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
688 spa->range_index = 8+1;
689 spa->address = t->dimm_dma[2];
690 spa->length = DIMM_SIZE;
692 /* spa9 (bdw for dcr3) dimm3 */
693 spa = nfit_buf + sizeof(*spa) * 9;
694 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
695 spa->header.length = sizeof(*spa);
696 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
697 spa->range_index = 9+1;
698 spa->address = t->dimm_dma[3];
699 spa->length = DIMM_SIZE;
701 offset = sizeof(*spa) * 10;
702 /* mem-region0 (spa0, dimm0) */
703 memdev = nfit_buf + offset;
704 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
705 memdev->header.length = sizeof(*memdev);
706 memdev->device_handle = handle[0];
707 memdev->physical_id = 0;
708 memdev->region_id = 0;
709 memdev->range_index = 0+1;
710 memdev->region_index = 4+1;
711 memdev->region_size = SPA0_SIZE/2;
712 memdev->region_offset = t->spa_set_dma[0];
714 memdev->interleave_index = 0;
715 memdev->interleave_ways = 2;
717 /* mem-region1 (spa0, dimm1) */
718 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
719 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
720 memdev->header.length = sizeof(*memdev);
721 memdev->device_handle = handle[1];
722 memdev->physical_id = 1;
723 memdev->region_id = 0;
724 memdev->range_index = 0+1;
725 memdev->region_index = 5+1;
726 memdev->region_size = SPA0_SIZE/2;
727 memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
729 memdev->interleave_index = 0;
730 memdev->interleave_ways = 2;
732 /* mem-region2 (spa1, dimm0) */
733 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
734 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
735 memdev->header.length = sizeof(*memdev);
736 memdev->device_handle = handle[0];
737 memdev->physical_id = 0;
738 memdev->region_id = 1;
739 memdev->range_index = 1+1;
740 memdev->region_index = 4+1;
741 memdev->region_size = SPA1_SIZE/4;
742 memdev->region_offset = t->spa_set_dma[1];
743 memdev->address = SPA0_SIZE/2;
744 memdev->interleave_index = 0;
745 memdev->interleave_ways = 4;
747 /* mem-region3 (spa1, dimm1) */
748 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
749 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
750 memdev->header.length = sizeof(*memdev);
751 memdev->device_handle = handle[1];
752 memdev->physical_id = 1;
753 memdev->region_id = 1;
754 memdev->range_index = 1+1;
755 memdev->region_index = 5+1;
756 memdev->region_size = SPA1_SIZE/4;
757 memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
758 memdev->address = SPA0_SIZE/2;
759 memdev->interleave_index = 0;
760 memdev->interleave_ways = 4;
762 /* mem-region4 (spa1, dimm2) */
763 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
764 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
765 memdev->header.length = sizeof(*memdev);
766 memdev->device_handle = handle[2];
767 memdev->physical_id = 2;
768 memdev->region_id = 0;
769 memdev->range_index = 1+1;
770 memdev->region_index = 6+1;
771 memdev->region_size = SPA1_SIZE/4;
772 memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
773 memdev->address = SPA0_SIZE/2;
774 memdev->interleave_index = 0;
775 memdev->interleave_ways = 4;
777 /* mem-region5 (spa1, dimm3) */
778 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
779 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
780 memdev->header.length = sizeof(*memdev);
781 memdev->device_handle = handle[3];
782 memdev->physical_id = 3;
783 memdev->region_id = 0;
784 memdev->range_index = 1+1;
785 memdev->region_index = 7+1;
786 memdev->region_size = SPA1_SIZE/4;
787 memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
788 memdev->address = SPA0_SIZE/2;
789 memdev->interleave_index = 0;
790 memdev->interleave_ways = 4;
792 /* mem-region6 (spa/dcr0, dimm0) */
793 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
794 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
795 memdev->header.length = sizeof(*memdev);
796 memdev->device_handle = handle[0];
797 memdev->physical_id = 0;
798 memdev->region_id = 0;
799 memdev->range_index = 2+1;
800 memdev->region_index = 0+1;
801 memdev->region_size = 0;
802 memdev->region_offset = 0;
804 memdev->interleave_index = 0;
805 memdev->interleave_ways = 1;
807 /* mem-region7 (spa/dcr1, dimm1) */
808 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
809 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
810 memdev->header.length = sizeof(*memdev);
811 memdev->device_handle = handle[1];
812 memdev->physical_id = 1;
813 memdev->region_id = 0;
814 memdev->range_index = 3+1;
815 memdev->region_index = 1+1;
816 memdev->region_size = 0;
817 memdev->region_offset = 0;
819 memdev->interleave_index = 0;
820 memdev->interleave_ways = 1;
822 /* mem-region8 (spa/dcr2, dimm2) */
823 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
824 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
825 memdev->header.length = sizeof(*memdev);
826 memdev->device_handle = handle[2];
827 memdev->physical_id = 2;
828 memdev->region_id = 0;
829 memdev->range_index = 4+1;
830 memdev->region_index = 2+1;
831 memdev->region_size = 0;
832 memdev->region_offset = 0;
834 memdev->interleave_index = 0;
835 memdev->interleave_ways = 1;
837 /* mem-region9 (spa/dcr3, dimm3) */
838 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
839 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
840 memdev->header.length = sizeof(*memdev);
841 memdev->device_handle = handle[3];
842 memdev->physical_id = 3;
843 memdev->region_id = 0;
844 memdev->range_index = 5+1;
845 memdev->region_index = 3+1;
846 memdev->region_size = 0;
847 memdev->region_offset = 0;
849 memdev->interleave_index = 0;
850 memdev->interleave_ways = 1;
852 /* mem-region10 (spa/bdw0, dimm0) */
853 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
854 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
855 memdev->header.length = sizeof(*memdev);
856 memdev->device_handle = handle[0];
857 memdev->physical_id = 0;
858 memdev->region_id = 0;
859 memdev->range_index = 6+1;
860 memdev->region_index = 0+1;
861 memdev->region_size = 0;
862 memdev->region_offset = 0;
864 memdev->interleave_index = 0;
865 memdev->interleave_ways = 1;
867 /* mem-region11 (spa/bdw1, dimm1) */
868 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
869 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
870 memdev->header.length = sizeof(*memdev);
871 memdev->device_handle = handle[1];
872 memdev->physical_id = 1;
873 memdev->region_id = 0;
874 memdev->range_index = 7+1;
875 memdev->region_index = 1+1;
876 memdev->region_size = 0;
877 memdev->region_offset = 0;
879 memdev->interleave_index = 0;
880 memdev->interleave_ways = 1;
882 /* mem-region12 (spa/bdw2, dimm2) */
883 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
884 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
885 memdev->header.length = sizeof(*memdev);
886 memdev->device_handle = handle[2];
887 memdev->physical_id = 2;
888 memdev->region_id = 0;
889 memdev->range_index = 8+1;
890 memdev->region_index = 2+1;
891 memdev->region_size = 0;
892 memdev->region_offset = 0;
894 memdev->interleave_index = 0;
895 memdev->interleave_ways = 1;
897 /* mem-region13 (spa/dcr3, dimm3) */
898 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
899 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
900 memdev->header.length = sizeof(*memdev);
901 memdev->device_handle = handle[3];
902 memdev->physical_id = 3;
903 memdev->region_id = 0;
904 memdev->range_index = 9+1;
905 memdev->region_index = 3+1;
906 memdev->region_size = 0;
907 memdev->region_offset = 0;
909 memdev->interleave_index = 0;
910 memdev->interleave_ways = 1;
912 offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
913 /* dcr-descriptor0: blk */
914 dcr = nfit_buf + offset;
915 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
916 dcr->header.length = sizeof(struct acpi_nfit_control_region);
917 dcr->region_index = 0+1;
918 dcr->vendor_id = 0xabcd;
920 dcr->revision_id = 1;
921 dcr->serial_number = ~handle[0];
922 dcr->code = NFIT_FIC_BLK;
924 dcr->window_size = DCR_SIZE;
925 dcr->command_offset = 0;
926 dcr->command_size = 8;
927 dcr->status_offset = 8;
928 dcr->status_size = 4;
930 /* dcr-descriptor1: blk */
931 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
932 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
933 dcr->header.length = sizeof(struct acpi_nfit_control_region);
934 dcr->region_index = 1+1;
935 dcr->vendor_id = 0xabcd;
937 dcr->revision_id = 1;
938 dcr->serial_number = ~handle[1];
939 dcr->code = NFIT_FIC_BLK;
941 dcr->window_size = DCR_SIZE;
942 dcr->command_offset = 0;
943 dcr->command_size = 8;
944 dcr->status_offset = 8;
945 dcr->status_size = 4;
947 /* dcr-descriptor2: blk */
948 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
949 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
950 dcr->header.length = sizeof(struct acpi_nfit_control_region);
951 dcr->region_index = 2+1;
952 dcr->vendor_id = 0xabcd;
954 dcr->revision_id = 1;
955 dcr->serial_number = ~handle[2];
956 dcr->code = NFIT_FIC_BLK;
958 dcr->window_size = DCR_SIZE;
959 dcr->command_offset = 0;
960 dcr->command_size = 8;
961 dcr->status_offset = 8;
962 dcr->status_size = 4;
964 /* dcr-descriptor3: blk */
965 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
966 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
967 dcr->header.length = sizeof(struct acpi_nfit_control_region);
968 dcr->region_index = 3+1;
969 dcr->vendor_id = 0xabcd;
971 dcr->revision_id = 1;
972 dcr->serial_number = ~handle[3];
973 dcr->code = NFIT_FIC_BLK;
975 dcr->window_size = DCR_SIZE;
976 dcr->command_offset = 0;
977 dcr->command_size = 8;
978 dcr->status_offset = 8;
979 dcr->status_size = 4;
981 offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
982 /* dcr-descriptor0: pmem */
983 dcr = nfit_buf + offset;
984 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
985 dcr->header.length = offsetof(struct acpi_nfit_control_region,
987 dcr->region_index = 4+1;
988 dcr->vendor_id = 0xabcd;
990 dcr->revision_id = 1;
991 dcr->serial_number = ~handle[0];
992 dcr->code = NFIT_FIC_BYTEN;
995 /* dcr-descriptor1: pmem */
996 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
998 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
999 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1001 dcr->region_index = 5+1;
1002 dcr->vendor_id = 0xabcd;
1004 dcr->revision_id = 1;
1005 dcr->serial_number = ~handle[1];
1006 dcr->code = NFIT_FIC_BYTEN;
1009 /* dcr-descriptor2: pmem */
1010 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1012 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1013 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1015 dcr->region_index = 6+1;
1016 dcr->vendor_id = 0xabcd;
1018 dcr->revision_id = 1;
1019 dcr->serial_number = ~handle[2];
1020 dcr->code = NFIT_FIC_BYTEN;
1023 /* dcr-descriptor3: pmem */
1024 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1026 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1027 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1029 dcr->region_index = 7+1;
1030 dcr->vendor_id = 0xabcd;
1032 dcr->revision_id = 1;
1033 dcr->serial_number = ~handle[3];
1034 dcr->code = NFIT_FIC_BYTEN;
1037 offset = offset + offsetof(struct acpi_nfit_control_region,
1039 /* bdw0 (spa/dcr0, dimm0) */
1040 bdw = nfit_buf + offset;
1041 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1042 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1043 bdw->region_index = 0+1;
1046 bdw->size = BDW_SIZE;
1047 bdw->capacity = DIMM_SIZE;
1048 bdw->start_address = 0;
1050 /* bdw1 (spa/dcr1, dimm1) */
1051 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
1052 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1053 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1054 bdw->region_index = 1+1;
1057 bdw->size = BDW_SIZE;
1058 bdw->capacity = DIMM_SIZE;
1059 bdw->start_address = 0;
1061 /* bdw2 (spa/dcr2, dimm2) */
1062 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
1063 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1064 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1065 bdw->region_index = 2+1;
1068 bdw->size = BDW_SIZE;
1069 bdw->capacity = DIMM_SIZE;
1070 bdw->start_address = 0;
1072 /* bdw3 (spa/dcr3, dimm3) */
1073 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
1074 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1075 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1076 bdw->region_index = 3+1;
1079 bdw->size = BDW_SIZE;
1080 bdw->capacity = DIMM_SIZE;
1081 bdw->start_address = 0;
1083 offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1084 /* flush0 (dimm0) */
1085 flush = nfit_buf + offset;
1086 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1087 flush->header.length = sizeof(struct acpi_nfit_flush_address);
1088 flush->device_handle = handle[0];
1089 flush->hint_count = 1;
1090 flush->hint_address[0] = t->flush_dma[0];
1092 /* flush1 (dimm1) */
1093 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
1094 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1095 flush->header.length = sizeof(struct acpi_nfit_flush_address);
1096 flush->device_handle = handle[1];
1097 flush->hint_count = 1;
1098 flush->hint_address[0] = t->flush_dma[1];
1100 /* flush2 (dimm2) */
1101 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
1102 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1103 flush->header.length = sizeof(struct acpi_nfit_flush_address);
1104 flush->device_handle = handle[2];
1105 flush->hint_count = 1;
1106 flush->hint_address[0] = t->flush_dma[2];
1108 /* flush3 (dimm3) */
1109 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
1110 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1111 flush->header.length = sizeof(struct acpi_nfit_flush_address);
1112 flush->device_handle = handle[3];
1113 flush->hint_count = 1;
1114 flush->hint_address[0] = t->flush_dma[3];
1116 if (t->setup_hotplug) {
1117 offset = offset + sizeof(struct acpi_nfit_flush_address) * 4;
1118 /* dcr-descriptor4: blk */
1119 dcr = nfit_buf + offset;
1120 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1121 dcr->header.length = sizeof(struct acpi_nfit_control_region);
1122 dcr->region_index = 8+1;
1123 dcr->vendor_id = 0xabcd;
1125 dcr->revision_id = 1;
1126 dcr->serial_number = ~handle[4];
1127 dcr->code = NFIT_FIC_BLK;
1129 dcr->window_size = DCR_SIZE;
1130 dcr->command_offset = 0;
1131 dcr->command_size = 8;
1132 dcr->status_offset = 8;
1133 dcr->status_size = 4;
1135 offset = offset + sizeof(struct acpi_nfit_control_region);
1136 /* dcr-descriptor4: pmem */
1137 dcr = nfit_buf + offset;
1138 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1139 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1141 dcr->region_index = 9+1;
1142 dcr->vendor_id = 0xabcd;
1144 dcr->revision_id = 1;
1145 dcr->serial_number = ~handle[4];
1146 dcr->code = NFIT_FIC_BYTEN;
1149 offset = offset + offsetof(struct acpi_nfit_control_region,
1151 /* bdw4 (spa/dcr4, dimm4) */
1152 bdw = nfit_buf + offset;
1153 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1154 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1155 bdw->region_index = 8+1;
1158 bdw->size = BDW_SIZE;
1159 bdw->capacity = DIMM_SIZE;
1160 bdw->start_address = 0;
1162 offset = offset + sizeof(struct acpi_nfit_data_region);
1163 /* spa10 (dcr4) dimm4 */
1164 spa = nfit_buf + offset;
1165 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1166 spa->header.length = sizeof(*spa);
1167 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1168 spa->range_index = 10+1;
1169 spa->address = t->dcr_dma[4];
1170 spa->length = DCR_SIZE;
1173 * spa11 (single-dimm interleave for hotplug, note storage
1174 * does not actually alias the related block-data-window
1177 spa = nfit_buf + offset + sizeof(*spa);
1178 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1179 spa->header.length = sizeof(*spa);
1180 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1181 spa->range_index = 11+1;
1182 spa->address = t->spa_set_dma[2];
1183 spa->length = SPA0_SIZE;
1185 /* spa12 (bdw for dcr4) dimm4 */
1186 spa = nfit_buf + offset + sizeof(*spa) * 2;
1187 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1188 spa->header.length = sizeof(*spa);
1189 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1190 spa->range_index = 12+1;
1191 spa->address = t->dimm_dma[4];
1192 spa->length = DIMM_SIZE;
1194 offset = offset + sizeof(*spa) * 3;
1195 /* mem-region14 (spa/dcr4, dimm4) */
1196 memdev = nfit_buf + offset;
1197 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1198 memdev->header.length = sizeof(*memdev);
1199 memdev->device_handle = handle[4];
1200 memdev->physical_id = 4;
1201 memdev->region_id = 0;
1202 memdev->range_index = 10+1;
1203 memdev->region_index = 8+1;
1204 memdev->region_size = 0;
1205 memdev->region_offset = 0;
1206 memdev->address = 0;
1207 memdev->interleave_index = 0;
1208 memdev->interleave_ways = 1;
1210 /* mem-region15 (spa0, dimm4) */
1211 memdev = nfit_buf + offset +
1212 sizeof(struct acpi_nfit_memory_map);
1213 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1214 memdev->header.length = sizeof(*memdev);
1215 memdev->device_handle = handle[4];
1216 memdev->physical_id = 4;
1217 memdev->region_id = 0;
1218 memdev->range_index = 11+1;
1219 memdev->region_index = 9+1;
1220 memdev->region_size = SPA0_SIZE;
1221 memdev->region_offset = t->spa_set_dma[2];
1222 memdev->address = 0;
1223 memdev->interleave_index = 0;
1224 memdev->interleave_ways = 1;
1226 /* mem-region16 (spa/bdw4, dimm4) */
1227 memdev = nfit_buf + offset +
1228 sizeof(struct acpi_nfit_memory_map) * 2;
1229 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1230 memdev->header.length = sizeof(*memdev);
1231 memdev->device_handle = handle[4];
1232 memdev->physical_id = 4;
1233 memdev->region_id = 0;
1234 memdev->range_index = 12+1;
1235 memdev->region_index = 8+1;
1236 memdev->region_size = 0;
1237 memdev->region_offset = 0;
1238 memdev->address = 0;
1239 memdev->interleave_index = 0;
1240 memdev->interleave_ways = 1;
1242 offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1243 /* flush3 (dimm4) */
1244 flush = nfit_buf + offset;
1245 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1246 flush->header.length = sizeof(struct acpi_nfit_flush_address);
1247 flush->device_handle = handle[4];
1248 flush->hint_count = 1;
1249 flush->hint_address[0] = t->flush_dma[4];
1252 post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
1254 acpi_desc = &t->acpi_desc;
1255 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
1256 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1257 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1258 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1259 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1260 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1261 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1264 static void nfit_test1_setup(struct nfit_test *t)
1267 void *nfit_buf = t->nfit_buf;
1268 struct acpi_nfit_memory_map *memdev;
1269 struct acpi_nfit_control_region *dcr;
1270 struct acpi_nfit_system_address *spa;
1271 struct acpi_nfit_desc *acpi_desc;
1274 /* spa0 (flat range with no bdw aliasing) */
1275 spa = nfit_buf + offset;
1276 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1277 spa->header.length = sizeof(*spa);
1278 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1279 spa->range_index = 0+1;
1280 spa->address = t->spa_set_dma[0];
1281 spa->length = SPA2_SIZE;
1283 offset += sizeof(*spa);
1284 /* mem-region0 (spa0, dimm0) */
1285 memdev = nfit_buf + offset;
1286 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1287 memdev->header.length = sizeof(*memdev);
1288 memdev->device_handle = 0;
1289 memdev->physical_id = 0;
1290 memdev->region_id = 0;
1291 memdev->range_index = 0+1;
1292 memdev->region_index = 0+1;
1293 memdev->region_size = SPA2_SIZE;
1294 memdev->region_offset = 0;
1295 memdev->address = 0;
1296 memdev->interleave_index = 0;
1297 memdev->interleave_ways = 1;
1298 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1299 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1300 | ACPI_NFIT_MEM_NOT_ARMED;
1302 offset += sizeof(*memdev);
1303 /* dcr-descriptor0 */
1304 dcr = nfit_buf + offset;
1305 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1306 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1308 dcr->region_index = 0+1;
1309 dcr->vendor_id = 0xabcd;
1311 dcr->revision_id = 1;
1312 dcr->serial_number = ~0;
1313 dcr->code = NFIT_FIC_BYTE;
1316 post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
1318 acpi_desc = &t->acpi_desc;
1319 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1320 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1321 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1322 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1325 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1326 void *iobuf, u64 len, int rw)
1328 struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1329 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1330 struct nd_region *nd_region = &ndbr->nd_region;
1333 lane = nd_region_acquire_lane(nd_region);
1335 memcpy(mmio->addr.base + dpa, iobuf, len);
1337 memcpy(iobuf, mmio->addr.base + dpa, len);
1339 /* give us some some coverage of the mmio_flush_range() API */
1340 mmio_flush_range(mmio->addr.base + dpa, len);
1342 nd_region_release_lane(nd_region, lane);
1347 static int nfit_test_probe(struct platform_device *pdev)
1349 struct nvdimm_bus_descriptor *nd_desc;
1350 struct acpi_nfit_desc *acpi_desc;
1351 struct device *dev = &pdev->dev;
1352 struct nfit_test *nfit_test;
1355 nfit_test = to_nfit_test(&pdev->dev);
1358 if (nfit_test->num_dcr) {
1359 int num = nfit_test->num_dcr;
1361 nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
1363 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1365 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
1367 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
1369 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
1371 nfit_test->label_dma = devm_kcalloc(dev, num,
1372 sizeof(dma_addr_t), GFP_KERNEL);
1373 nfit_test->dcr = devm_kcalloc(dev, num,
1374 sizeof(struct nfit_test_dcr *), GFP_KERNEL);
1375 nfit_test->dcr_dma = devm_kcalloc(dev, num,
1376 sizeof(dma_addr_t), GFP_KERNEL);
1377 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
1378 && nfit_test->label_dma && nfit_test->dcr
1379 && nfit_test->dcr_dma && nfit_test->flush
1380 && nfit_test->flush_dma)
1386 if (nfit_test->num_pm) {
1387 int num = nfit_test->num_pm;
1389 nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
1391 nfit_test->spa_set_dma = devm_kcalloc(dev, num,
1392 sizeof(dma_addr_t), GFP_KERNEL);
1393 if (nfit_test->spa_set && nfit_test->spa_set_dma)
1399 /* per-nfit specific alloc */
1400 if (nfit_test->alloc(nfit_test))
1403 nfit_test->setup(nfit_test);
1404 acpi_desc = &nfit_test->acpi_desc;
1405 acpi_nfit_desc_init(acpi_desc, &pdev->dev);
1406 acpi_desc->nfit = nfit_test->nfit_buf;
1407 acpi_desc->blk_do_io = nfit_test_blk_do_io;
1408 nd_desc = &acpi_desc->nd_desc;
1409 nd_desc->provider_name = NULL;
1410 nd_desc->ndctl = nfit_test_ctl;
1411 acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc);
1412 if (!acpi_desc->nvdimm_bus)
1415 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
1417 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1421 if (nfit_test->setup != nfit_test0_setup)
1424 nfit_test->setup_hotplug = 1;
1425 nfit_test->setup(nfit_test);
1427 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
1429 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1436 static int nfit_test_remove(struct platform_device *pdev)
1438 struct nfit_test *nfit_test = to_nfit_test(&pdev->dev);
1439 struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc;
1441 nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1446 static void nfit_test_release(struct device *dev)
1448 struct nfit_test *nfit_test = to_nfit_test(dev);
1453 static const struct platform_device_id nfit_test_id[] = {
1458 static struct platform_driver nfit_test_driver = {
1459 .probe = nfit_test_probe,
1460 .remove = nfit_test_remove,
1462 .name = KBUILD_MODNAME,
1464 .id_table = nfit_test_id,
1467 #ifdef CONFIG_CMA_SIZE_MBYTES
1468 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
1470 #define CMA_SIZE_MBYTES 0
1473 static __init int nfit_test_init(void)
1477 nfit_test_setup(nfit_test_lookup);
1479 for (i = 0; i < NUM_NFITS; i++) {
1480 struct nfit_test *nfit_test;
1481 struct platform_device *pdev;
1484 nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
1489 INIT_LIST_HEAD(&nfit_test->resources);
1492 nfit_test->num_pm = NUM_PM;
1493 nfit_test->num_dcr = NUM_DCR;
1494 nfit_test->alloc = nfit_test0_alloc;
1495 nfit_test->setup = nfit_test0_setup;
1498 nfit_test->num_pm = 1;
1499 nfit_test->alloc = nfit_test1_alloc;
1500 nfit_test->setup = nfit_test1_setup;
1506 pdev = &nfit_test->pdev;
1507 pdev->name = KBUILD_MODNAME;
1509 pdev->dev.release = nfit_test_release;
1510 rc = platform_device_register(pdev);
1512 put_device(&pdev->dev);
1516 rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1520 instances[i] = nfit_test;
1526 buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma,
1530 dev_warn(&pdev->dev, "need 128M of free cma\n");
1533 dma_free_coherent(&pdev->dev, SZ_128M, buf, dma);
1537 rc = platform_driver_register(&nfit_test_driver);
1543 for (i = 0; i < NUM_NFITS; i++)
1545 platform_device_unregister(&instances[i]->pdev);
1546 nfit_test_teardown();
1550 static __exit void nfit_test_exit(void)
1554 platform_driver_unregister(&nfit_test_driver);
1555 for (i = 0; i < NUM_NFITS; i++)
1556 platform_device_unregister(&instances[i]->pdev);
1557 nfit_test_teardown();
1560 module_init(nfit_test_init);
1561 module_exit(nfit_test_exit);
1562 MODULE_LICENSE("GPL v2");
1563 MODULE_AUTHOR("Intel Corporation");