2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2015 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
15 * Copyright(c) 2015 Intel Corporation. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * PCIe NTB Perf Linux driver
46 #include <linux/init.h>
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/kthread.h>
50 #include <linux/time.h>
51 #include <linux/timer.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/pci.h>
54 #include <linux/slab.h>
55 #include <linux/spinlock.h>
56 #include <linux/debugfs.h>
57 #include <linux/dmaengine.h>
58 #include <linux/delay.h>
59 #include <linux/sizes.h>
60 #include <linux/ntb.h>
61 #include <linux/mutex.h>
63 #define DRIVER_NAME "ntb_perf"
64 #define DRIVER_DESCRIPTION "PCIe NTB Performance Measurement Tool"
66 #define DRIVER_LICENSE "Dual BSD/GPL"
67 #define DRIVER_VERSION "1.0"
68 #define DRIVER_AUTHOR "Dave Jiang <dave.jiang@intel.com>"
70 #define PERF_LINK_DOWN_TIMEOUT 10
71 #define PERF_VERSION 0xffff0001
72 #define MAX_THREADS 32
73 #define MAX_TEST_SIZE SZ_1M
75 #define DMA_OUT_RESOURCE_TO 50
76 #define DMA_RETRIES 20
77 #define SZ_4G (1ULL << 32)
78 #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
80 MODULE_LICENSE(DRIVER_LICENSE);
81 MODULE_VERSION(DRIVER_VERSION);
82 MODULE_AUTHOR(DRIVER_AUTHOR);
83 MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
85 static struct dentry *perf_debugfs_dir;
87 static unsigned long max_mw_size;
88 module_param(max_mw_size, ulong, 0644);
89 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
91 static unsigned int seg_order = 19; /* 512K */
92 module_param(seg_order, uint, 0644);
93 MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
95 static unsigned int run_order = 32; /* 4G */
96 module_param(run_order, uint, 0644);
97 MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer");
99 static bool use_dma; /* default to 0 */
100 module_param(use_dma, bool, 0644);
101 MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance");
104 phys_addr_t phys_addr;
105 resource_size_t phys_size;
106 resource_size_t xlat_align;
107 resource_size_t xlat_align_size;
118 struct task_struct *thread;
119 struct perf_ctx *perf;
121 struct dma_chan *dma_chan;
124 void *srcs[MAX_SRCS];
125 wait_queue_head_t *wq;
136 struct work_struct link_cleanup;
137 struct delayed_work link_work;
138 wait_queue_head_t link_wq;
139 struct dentry *debugfs_node_dir;
140 struct dentry *debugfs_run;
141 struct dentry *debugfs_threads;
143 /* mutex ensures only one set of threads run at once */
144 struct mutex run_mutex;
145 struct pthr_ctx pthr_ctx[MAX_THREADS];
157 static void perf_link_event(void *ctx)
159 struct perf_ctx *perf = ctx;
161 if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1)
162 schedule_delayed_work(&perf->link_work, 2*HZ);
164 schedule_work(&perf->link_cleanup);
167 static void perf_db_event(void *ctx, int vec)
169 struct perf_ctx *perf = ctx;
170 u64 db_bits, db_mask;
172 db_mask = ntb_db_vector_mask(perf->ntb, vec);
173 db_bits = ntb_db_read(perf->ntb);
175 dev_dbg(&perf->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
176 vec, db_mask, db_bits);
179 static const struct ntb_ctx_ops perf_ops = {
180 .link_event = perf_link_event,
181 .db_event = perf_db_event,
184 static void perf_copy_callback(void *data)
186 struct pthr_ctx *pctx = data;
188 atomic_dec(&pctx->dma_sync);
191 static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
192 char *src, size_t size)
194 struct perf_ctx *perf = pctx->perf;
195 struct dma_async_tx_descriptor *txd;
196 struct dma_chan *chan = pctx->dma_chan;
197 struct dma_device *device;
198 struct dmaengine_unmap_data *unmap;
200 size_t src_off, dst_off;
201 struct perf_mw *mw = &perf->mw;
203 void __iomem *dst_vaddr;
208 memcpy_toio(dst, src, size);
213 dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
217 device = chan->device;
218 src_off = (uintptr_t)src & ~PAGE_MASK;
219 dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
221 if (!is_dma_copy_aligned(device, src_off, dst_off, size))
226 dst_phys = mw->phys_addr + (dst_vaddr - vbase);
228 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
233 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
234 src_off, size, DMA_TO_DEVICE);
235 if (dma_mapping_error(device->dev, unmap->addr[0]))
241 txd = device->device_prep_dma_memcpy(chan, dst_phys,
243 size, DMA_PREP_INTERRUPT);
245 set_current_state(TASK_INTERRUPTIBLE);
246 schedule_timeout(DMA_OUT_RESOURCE_TO);
248 } while (!txd && (++retries < DMA_RETRIES));
251 pctx->dma_prep_err++;
255 txd->callback = perf_copy_callback;
256 txd->callback_param = pctx;
257 dma_set_unmap(txd, unmap);
259 cookie = dmaengine_submit(txd);
260 if (dma_submit_error(cookie))
263 atomic_inc(&pctx->dma_sync);
264 dma_async_issue_pending(chan);
269 dmaengine_unmap_put(unmap);
271 dmaengine_unmap_put(unmap);
275 static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
276 u64 buf_size, u64 win_size, u64 total)
278 int chunks, total_chunks, i;
279 int copied_chunks = 0;
280 u64 copied = 0, result;
281 char __iomem *tmp = dst;
283 ktime_t kstart, kstop, kdiff;
284 unsigned long last_sleep = jiffies;
286 chunks = div64_u64(win_size, buf_size);
287 total_chunks = div64_u64(total, buf_size);
288 kstart = ktime_get();
290 for (i = 0; i < total_chunks; i++) {
291 result = perf_copy(pctx, tmp, src, buf_size);
294 if (copied_chunks == chunks) {
300 /* Probably should schedule every 5s to prevent soft hang. */
301 if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
302 last_sleep = jiffies;
303 set_current_state(TASK_INTERRUPTIBLE);
307 if (unlikely(kthread_should_stop()))
312 pr_debug("%s: All DMA descriptors submitted\n", current->comm);
313 while (atomic_read(&pctx->dma_sync) != 0) {
314 if (kthread_should_stop())
321 kdiff = ktime_sub(kstop, kstart);
322 diff_us = ktime_to_us(kdiff);
324 pr_debug("%s: copied %llu bytes\n", current->comm, copied);
326 pr_debug("%s: lasted %llu usecs\n", current->comm, diff_us);
328 perf = div64_u64(copied, diff_us);
330 pr_debug("%s: MBytes/s: %llu\n", current->comm, perf);
332 pctx->copied = copied;
333 pctx->diff_us = diff_us;
338 static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
340 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
343 static int ntb_perf_thread(void *data)
345 struct pthr_ctx *pctx = data;
346 struct perf_ctx *perf = pctx->perf;
347 struct pci_dev *pdev = perf->ntb->pdev;
348 struct perf_mw *mw = &perf->mw;
350 u64 win_size, buf_size, total;
353 struct dma_chan *dma_chan = NULL;
355 pr_debug("kthread %s starting...\n", current->comm);
357 node = dev_to_node(&pdev->dev);
359 if (use_dma && !pctx->dma_chan) {
360 dma_cap_mask_t dma_mask;
362 dma_cap_zero(dma_mask);
363 dma_cap_set(DMA_MEMCPY, dma_mask);
364 dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
365 (void *)(unsigned long)node);
367 pr_warn("%s: cannot acquire DMA channel, quitting\n",
371 pctx->dma_chan = dma_chan;
374 for (i = 0; i < MAX_SRCS; i++) {
375 pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
376 if (!pctx->srcs[i]) {
382 win_size = mw->phys_size;
383 buf_size = 1ULL << seg_order;
384 total = 1ULL << run_order;
386 if (buf_size > MAX_TEST_SIZE)
387 buf_size = MAX_TEST_SIZE;
389 dst = (char __iomem *)mw->vbase;
391 atomic_inc(&perf->tsync);
392 while (atomic_read(&perf->tsync) != perf->perf_threads)
395 src = pctx->srcs[pctx->src_idx];
396 pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);
398 rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);
400 atomic_dec(&perf->tsync);
403 pr_err("%s: failed\n", current->comm);
408 for (i = 0; i < MAX_SRCS; i++) {
409 kfree(pctx->srcs[i]);
410 pctx->srcs[i] = NULL;
413 atomic_inc(&perf->tdone);
419 for (i = 0; i < MAX_SRCS; i++) {
420 kfree(pctx->srcs[i]);
421 pctx->srcs[i] = NULL;
425 dma_release_channel(dma_chan);
426 pctx->dma_chan = NULL;
430 /* Wait until we are told to stop */
432 set_current_state(TASK_INTERRUPTIBLE);
433 if (kthread_should_stop())
437 __set_current_state(TASK_RUNNING);
442 static void perf_free_mw(struct perf_ctx *perf)
444 struct perf_mw *mw = &perf->mw;
445 struct pci_dev *pdev = perf->ntb->pdev;
450 ntb_mw_clear_trans(perf->ntb, 0);
451 dma_free_coherent(&pdev->dev, mw->buf_size,
452 mw->virt_addr, mw->dma_addr);
455 mw->virt_addr = NULL;
458 static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
460 struct perf_mw *mw = &perf->mw;
461 size_t xlat_size, buf_size;
467 xlat_size = round_up(size, mw->xlat_align_size);
468 buf_size = round_up(size, mw->xlat_align);
470 if (mw->xlat_size == xlat_size)
476 mw->xlat_size = xlat_size;
477 mw->buf_size = buf_size;
479 mw->virt_addr = dma_alloc_coherent(&perf->ntb->pdev->dev, buf_size,
480 &mw->dma_addr, GFP_KERNEL);
481 if (!mw->virt_addr) {
486 rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size);
488 dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
496 static void perf_link_work(struct work_struct *work)
498 struct perf_ctx *perf =
499 container_of(work, struct perf_ctx, link_work.work);
500 struct ntb_dev *ndev = perf->ntb;
501 struct pci_dev *pdev = ndev->pdev;
506 dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
508 size = perf->mw.phys_size;
510 if (max_mw_size && size > max_mw_size)
513 ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
514 ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
515 ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);
517 /* now read what peer wrote */
518 val = ntb_spad_read(ndev, VERSION);
519 if (val != PERF_VERSION) {
520 dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
524 val = ntb_spad_read(ndev, MW_SZ_HIGH);
525 size = (u64)val << 32;
527 val = ntb_spad_read(ndev, MW_SZ_LOW);
530 dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);
532 rc = perf_set_mw(perf, size);
536 perf->link_is_up = true;
537 wake_up(&perf->link_wq);
545 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
546 schedule_delayed_work(&perf->link_work,
547 msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
550 static void perf_link_cleanup(struct work_struct *work)
552 struct perf_ctx *perf = container_of(work,
556 dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
558 if (!perf->link_is_up)
559 cancel_delayed_work_sync(&perf->link_work);
562 static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
569 rc = ntb_mw_get_range(ntb, 0, &mw->phys_addr, &mw->phys_size,
570 &mw->xlat_align, &mw->xlat_align_size);
574 perf->mw.vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
581 static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
582 size_t count, loff_t *offp)
584 struct perf_ctx *perf = filp->private_data;
586 ssize_t ret, out_off = 0;
587 struct pthr_ctx *pctx;
594 buf = kmalloc(1024, GFP_KERNEL);
598 if (mutex_is_locked(&perf->run_mutex)) {
599 out_off = snprintf(buf, 64, "running\n");
603 for (i = 0; i < MAX_THREADS; i++) {
604 pctx = &perf->pthr_ctx[i];
606 if (pctx->status == -ENODATA)
610 out_off += snprintf(buf + out_off, 1024 - out_off,
616 rate = div64_u64(pctx->copied, pctx->diff_us);
617 out_off += snprintf(buf + out_off, 1024 - out_off,
618 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
619 i, pctx->copied, pctx->diff_us, rate);
623 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_off);
629 static void threads_cleanup(struct perf_ctx *perf)
631 struct pthr_ctx *pctx;
634 for (i = 0; i < MAX_THREADS; i++) {
635 pctx = &perf->pthr_ctx[i];
637 pctx->status = kthread_stop(pctx->thread);
643 static void perf_clear_thread_status(struct perf_ctx *perf)
647 for (i = 0; i < MAX_THREADS; i++)
648 perf->pthr_ctx[i].status = -ENODATA;
651 static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
652 size_t count, loff_t *offp)
654 struct perf_ctx *perf = filp->private_data;
656 DECLARE_WAIT_QUEUE_HEAD(wq);
658 if (wait_event_interruptible(perf->link_wq, perf->link_is_up))
661 if (perf->perf_threads == 0)
664 if (!mutex_trylock(&perf->run_mutex))
667 perf_clear_thread_status(perf);
669 if (perf->perf_threads > MAX_THREADS) {
670 perf->perf_threads = MAX_THREADS;
671 pr_info("Reset total threads to: %u\n", MAX_THREADS);
674 /* no greater than 1M */
675 if (seg_order > MAX_SEG_ORDER) {
676 seg_order = MAX_SEG_ORDER;
677 pr_info("Fix seg_order to %u\n", seg_order);
680 if (run_order < seg_order) {
681 run_order = seg_order;
682 pr_info("Fix run_order to %u\n", run_order);
685 node = dev_to_node(&perf->ntb->pdev->dev);
686 atomic_set(&perf->tdone, 0);
688 /* launch kernel thread */
689 for (i = 0; i < perf->perf_threads; i++) {
690 struct pthr_ctx *pctx;
692 pctx = &perf->pthr_ctx[i];
693 atomic_set(&pctx->dma_sync, 0);
697 kthread_create_on_node(ntb_perf_thread,
699 node, "ntb_perf %d", i);
700 if (IS_ERR(pctx->thread)) {
704 wake_up_process(pctx->thread);
708 wait_event_interruptible(wq,
709 atomic_read(&perf->tdone) == perf->perf_threads);
711 threads_cleanup(perf);
712 mutex_unlock(&perf->run_mutex);
716 threads_cleanup(perf);
717 mutex_unlock(&perf->run_mutex);
721 static const struct file_operations ntb_perf_debugfs_run = {
722 .owner = THIS_MODULE,
724 .read = debugfs_run_read,
725 .write = debugfs_run_write,
728 static int perf_debugfs_setup(struct perf_ctx *perf)
730 struct pci_dev *pdev = perf->ntb->pdev;
732 if (!debugfs_initialized())
735 if (!perf_debugfs_dir) {
736 perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
737 if (!perf_debugfs_dir)
741 perf->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
743 if (!perf->debugfs_node_dir)
746 perf->debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
747 perf->debugfs_node_dir, perf,
748 &ntb_perf_debugfs_run);
749 if (!perf->debugfs_run)
752 perf->debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
753 perf->debugfs_node_dir,
754 &perf->perf_threads);
755 if (!perf->debugfs_threads)
761 static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
763 struct pci_dev *pdev = ntb->pdev;
764 struct perf_ctx *perf;
768 if (ntb_spad_count(ntb) < MAX_SPAD) {
769 dev_err(&ntb->dev, "Not enough scratch pad registers for %s",
774 node = dev_to_node(&pdev->dev);
776 perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
783 perf->perf_threads = 1;
784 atomic_set(&perf->tsync, 0);
785 mutex_init(&perf->run_mutex);
786 spin_lock_init(&perf->db_lock);
787 perf_setup_mw(ntb, perf);
788 init_waitqueue_head(&perf->link_wq);
789 INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
790 INIT_WORK(&perf->link_cleanup, perf_link_cleanup);
792 rc = ntb_set_ctx(ntb, perf, &perf_ops);
796 perf->link_is_up = false;
797 ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
800 rc = perf_debugfs_setup(perf);
804 perf_clear_thread_status(perf);
809 cancel_delayed_work_sync(&perf->link_work);
810 cancel_work_sync(&perf->link_cleanup);
816 static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
818 struct perf_ctx *perf = ntb->ctx;
821 dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
823 mutex_lock(&perf->run_mutex);
825 cancel_delayed_work_sync(&perf->link_work);
826 cancel_work_sync(&perf->link_cleanup);
829 ntb_link_disable(ntb);
831 debugfs_remove_recursive(perf_debugfs_dir);
832 perf_debugfs_dir = NULL;
835 for (i = 0; i < MAX_THREADS; i++) {
836 struct pthr_ctx *pctx = &perf->pthr_ctx[i];
839 dma_release_channel(pctx->dma_chan);
846 static struct ntb_client perf_client = {
849 .remove = perf_remove,
852 module_ntb_client(perf_client);