2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
15 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Intel PCIe NTB Linux driver
45 * Contact Information:
46 * Jon Mason <jon.mason@intel.com>
48 #include <linux/debugfs.h>
49 #include <linux/delay.h>
50 #include <linux/dmaengine.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/errno.h>
53 #include <linux/export.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56 #include <linux/pci.h>
57 #include <linux/slab.h>
58 #include <linux/types.h>
59 #include <linux/ntb.h>
62 #define NTB_TRANSPORT_VERSION 3
64 static unsigned int transport_mtu = 0x401E;
65 module_param(transport_mtu, uint, 0644);
66 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
68 static unsigned char max_num_clients;
69 module_param(max_num_clients, byte, 0644);
70 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
72 static unsigned int copy_bytes = 1024;
73 module_param(copy_bytes, uint, 0644);
74 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
76 struct ntb_queue_entry {
77 /* ntb_queue list reference */
78 struct list_head entry;
79 /* pointers to data to be transfered */
85 struct ntb_transport_qp *qp;
87 struct ntb_payload_header __iomem *tx_hdr;
88 struct ntb_payload_header *rx_hdr;
97 struct ntb_transport_qp {
98 struct ntb_transport *transport;
99 struct ntb_device *ndev;
101 struct dma_chan *dma_chan;
105 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
107 struct ntb_rx_info __iomem *rx_info;
108 struct ntb_rx_info *remote_rx_info;
110 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
111 void *data, int len);
112 struct list_head tx_free_q;
113 spinlock_t ntb_tx_free_q_lock;
115 dma_addr_t tx_mw_phys;
116 unsigned int tx_index;
117 unsigned int tx_max_entry;
118 unsigned int tx_max_frame;
120 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
121 void *data, int len);
122 struct tasklet_struct rx_work;
123 struct list_head rx_pend_q;
124 struct list_head rx_free_q;
125 spinlock_t ntb_rx_pend_q_lock;
126 spinlock_t ntb_rx_free_q_lock;
128 unsigned int rx_index;
129 unsigned int rx_max_entry;
130 unsigned int rx_max_frame;
131 dma_cookie_t last_cookie;
133 void (*event_handler) (void *data, int status);
134 struct delayed_work link_work;
135 struct work_struct link_cleanup;
137 struct dentry *debugfs_dir;
138 struct dentry *debugfs_stats;
157 struct ntb_transport_mw {
163 struct ntb_transport_client_dev {
164 struct list_head entry;
168 struct ntb_transport {
169 struct list_head entry;
170 struct list_head client_devs;
172 struct ntb_device *ndev;
173 struct ntb_transport_mw *mw;
174 struct ntb_transport_qp *qps;
175 unsigned int max_qps;
176 unsigned long qp_bitmap;
178 struct delayed_work link_work;
179 struct work_struct link_cleanup;
183 DESC_DONE_FLAG = 1 << 0,
184 LINK_DOWN_FLAG = 1 << 1,
187 struct ntb_payload_header {
205 #define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev))
206 #define NTB_QP_DEF_NUM_ENTRIES 100
207 #define NTB_LINK_DOWN_TIMEOUT 10
209 static int ntb_match_bus(struct device *dev, struct device_driver *drv)
211 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
214 static int ntb_client_probe(struct device *dev)
216 const struct ntb_client *drv = container_of(dev->driver,
217 struct ntb_client, driver);
218 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
222 if (drv && drv->probe)
223 rc = drv->probe(pdev);
230 static int ntb_client_remove(struct device *dev)
232 const struct ntb_client *drv = container_of(dev->driver,
233 struct ntb_client, driver);
234 struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
236 if (drv && drv->remove)
244 static struct bus_type ntb_bus_type = {
246 .match = ntb_match_bus,
247 .probe = ntb_client_probe,
248 .remove = ntb_client_remove,
251 static LIST_HEAD(ntb_transport_list);
253 static int ntb_bus_init(struct ntb_transport *nt)
255 if (list_empty(&ntb_transport_list)) {
256 int rc = bus_register(&ntb_bus_type);
261 list_add(&nt->entry, &ntb_transport_list);
266 static void ntb_bus_remove(struct ntb_transport *nt)
268 struct ntb_transport_client_dev *client_dev, *cd;
270 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
271 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
272 dev_name(&client_dev->dev));
273 list_del(&client_dev->entry);
274 device_unregister(&client_dev->dev);
277 list_del(&nt->entry);
279 if (list_empty(&ntb_transport_list))
280 bus_unregister(&ntb_bus_type);
283 static void ntb_client_release(struct device *dev)
285 struct ntb_transport_client_dev *client_dev;
286 client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
292 * ntb_unregister_client_dev - Unregister NTB client device
293 * @device_name: Name of NTB client device
295 * Unregister an NTB client device with the NTB transport layer
297 void ntb_unregister_client_dev(char *device_name)
299 struct ntb_transport_client_dev *client, *cd;
300 struct ntb_transport *nt;
302 list_for_each_entry(nt, &ntb_transport_list, entry)
303 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
304 if (!strncmp(dev_name(&client->dev), device_name,
305 strlen(device_name))) {
306 list_del(&client->entry);
307 device_unregister(&client->dev);
310 EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
313 * ntb_register_client_dev - Register NTB client device
314 * @device_name: Name of NTB client device
316 * Register an NTB client device with the NTB transport layer
318 int ntb_register_client_dev(char *device_name)
320 struct ntb_transport_client_dev *client_dev;
321 struct ntb_transport *nt;
324 if (list_empty(&ntb_transport_list))
327 list_for_each_entry(nt, &ntb_transport_list, entry) {
330 client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
337 dev = &client_dev->dev;
339 /* setup and register client devices */
340 dev_set_name(dev, "%s%d", device_name, i);
341 dev->bus = &ntb_bus_type;
342 dev->release = ntb_client_release;
343 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
345 rc = device_register(dev);
351 list_add_tail(&client_dev->entry, &nt->client_devs);
358 ntb_unregister_client_dev(device_name);
362 EXPORT_SYMBOL_GPL(ntb_register_client_dev);
365 * ntb_register_client - Register NTB client driver
366 * @drv: NTB client driver to be registered
368 * Register an NTB client driver with the NTB transport layer
370 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
372 int ntb_register_client(struct ntb_client *drv)
374 drv->driver.bus = &ntb_bus_type;
376 if (list_empty(&ntb_transport_list))
379 return driver_register(&drv->driver);
381 EXPORT_SYMBOL_GPL(ntb_register_client);
384 * ntb_unregister_client - Unregister NTB client driver
385 * @drv: NTB client driver to be unregistered
387 * Unregister an NTB client driver with the NTB transport layer
389 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
391 void ntb_unregister_client(struct ntb_client *drv)
393 driver_unregister(&drv->driver);
395 EXPORT_SYMBOL_GPL(ntb_unregister_client);
397 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
400 struct ntb_transport_qp *qp;
402 ssize_t ret, out_offset, out_count;
406 buf = kmalloc(out_count, GFP_KERNEL);
410 qp = filp->private_data;
412 out_offset += snprintf(buf + out_offset, out_count - out_offset,
414 out_offset += snprintf(buf + out_offset, out_count - out_offset,
415 "rx_bytes - \t%llu\n", qp->rx_bytes);
416 out_offset += snprintf(buf + out_offset, out_count - out_offset,
417 "rx_pkts - \t%llu\n", qp->rx_pkts);
418 out_offset += snprintf(buf + out_offset, out_count - out_offset,
419 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
420 out_offset += snprintf(buf + out_offset, out_count - out_offset,
421 "rx_async - \t%llu\n", qp->rx_async);
422 out_offset += snprintf(buf + out_offset, out_count - out_offset,
423 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
424 out_offset += snprintf(buf + out_offset, out_count - out_offset,
425 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
426 out_offset += snprintf(buf + out_offset, out_count - out_offset,
427 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
428 out_offset += snprintf(buf + out_offset, out_count - out_offset,
429 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
430 out_offset += snprintf(buf + out_offset, out_count - out_offset,
431 "rx_buff - \t%p\n", qp->rx_buff);
432 out_offset += snprintf(buf + out_offset, out_count - out_offset,
433 "rx_index - \t%u\n", qp->rx_index);
434 out_offset += snprintf(buf + out_offset, out_count - out_offset,
435 "rx_max_entry - \t%u\n", qp->rx_max_entry);
437 out_offset += snprintf(buf + out_offset, out_count - out_offset,
438 "tx_bytes - \t%llu\n", qp->tx_bytes);
439 out_offset += snprintf(buf + out_offset, out_count - out_offset,
440 "tx_pkts - \t%llu\n", qp->tx_pkts);
441 out_offset += snprintf(buf + out_offset, out_count - out_offset,
442 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
443 out_offset += snprintf(buf + out_offset, out_count - out_offset,
444 "tx_async - \t%llu\n", qp->tx_async);
445 out_offset += snprintf(buf + out_offset, out_count - out_offset,
446 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
447 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
449 out_offset += snprintf(buf + out_offset, out_count - out_offset,
450 "tx_mw - \t%p\n", qp->tx_mw);
451 out_offset += snprintf(buf + out_offset, out_count - out_offset,
452 "tx_index - \t%u\n", qp->tx_index);
453 out_offset += snprintf(buf + out_offset, out_count - out_offset,
454 "tx_max_entry - \t%u\n", qp->tx_max_entry);
456 out_offset += snprintf(buf + out_offset, out_count - out_offset,
457 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
459 if (out_offset > out_count)
460 out_offset = out_count;
462 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
467 static const struct file_operations ntb_qp_debugfs_stats = {
468 .owner = THIS_MODULE,
470 .read = debugfs_read,
473 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
474 struct list_head *list)
478 spin_lock_irqsave(lock, flags);
479 list_add_tail(entry, list);
480 spin_unlock_irqrestore(lock, flags);
483 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
484 struct list_head *list)
486 struct ntb_queue_entry *entry;
489 spin_lock_irqsave(lock, flags);
490 if (list_empty(list)) {
494 entry = list_first_entry(list, struct ntb_queue_entry, entry);
495 list_del(&entry->entry);
497 spin_unlock_irqrestore(lock, flags);
502 static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
505 struct ntb_transport_qp *qp = &nt->qps[qp_num];
506 unsigned int rx_size, num_qps_mw;
510 mw_max = ntb_max_mw(nt->ndev);
511 mw_num = QP_TO_MW(nt->ndev, qp_num);
513 WARN_ON(nt->mw[mw_num].virt_addr == NULL);
515 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
516 num_qps_mw = nt->max_qps / mw_max + 1;
518 num_qps_mw = nt->max_qps / mw_max;
520 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
521 qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
522 rx_size -= sizeof(struct ntb_rx_info);
524 qp->remote_rx_info = qp->rx_buff + rx_size;
526 /* Due to housekeeping, there must be atleast 2 buffs */
527 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
528 qp->rx_max_entry = rx_size / qp->rx_max_frame;
531 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
533 /* setup the hdr offsets with 0's */
534 for (i = 0; i < qp->rx_max_entry; i++) {
535 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
536 sizeof(struct ntb_payload_header);
537 memset(offset, 0, sizeof(struct ntb_payload_header));
545 static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
547 struct ntb_transport_mw *mw = &nt->mw[num_mw];
548 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
553 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
554 mw->virt_addr = NULL;
557 static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
559 struct ntb_transport_mw *mw = &nt->mw[num_mw];
560 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
562 /* No need to re-setup */
563 if (mw->size == ALIGN(size, 4096))
567 ntb_free_mw(nt, num_mw);
569 /* Alloc memory for receiving data. Must be 4k aligned */
570 mw->size = ALIGN(size, 4096);
572 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
574 if (!mw->virt_addr) {
576 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
581 /* Notify HW the memory location of the receive buffer */
582 ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
587 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
589 struct ntb_transport *nt = qp->transport;
590 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
592 if (qp->qp_link == NTB_LINK_DOWN) {
593 cancel_delayed_work_sync(&qp->link_work);
597 if (qp->event_handler)
598 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
600 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
601 qp->qp_link = NTB_LINK_DOWN;
604 static void ntb_qp_link_cleanup_work(struct work_struct *work)
606 struct ntb_transport_qp *qp = container_of(work,
607 struct ntb_transport_qp,
609 struct ntb_transport *nt = qp->transport;
611 ntb_qp_link_cleanup(qp);
613 if (nt->transport_link == NTB_LINK_UP)
614 schedule_delayed_work(&qp->link_work,
615 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
618 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
620 schedule_work(&qp->link_cleanup);
623 static void ntb_transport_link_cleanup(struct ntb_transport *nt)
627 /* Pass along the info to any clients */
628 for (i = 0; i < nt->max_qps; i++)
629 if (!test_bit(i, &nt->qp_bitmap))
630 ntb_qp_link_cleanup(&nt->qps[i]);
632 if (nt->transport_link == NTB_LINK_DOWN)
633 cancel_delayed_work_sync(&nt->link_work);
635 nt->transport_link = NTB_LINK_DOWN;
637 /* The scratchpad registers keep the values if the remote side
638 * goes down, blast them now to give them a sane value the next
639 * time they are accessed
641 for (i = 0; i < MAX_SPAD; i++)
642 ntb_write_local_spad(nt->ndev, i, 0);
645 static void ntb_transport_link_cleanup_work(struct work_struct *work)
647 struct ntb_transport *nt = container_of(work, struct ntb_transport,
650 ntb_transport_link_cleanup(nt);
653 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
655 struct ntb_transport *nt = data;
658 case NTB_EVENT_HW_LINK_UP:
659 schedule_delayed_work(&nt->link_work, 0);
661 case NTB_EVENT_HW_LINK_DOWN:
662 schedule_work(&nt->link_cleanup);
669 static void ntb_transport_link_work(struct work_struct *work)
671 struct ntb_transport *nt = container_of(work, struct ntb_transport,
673 struct ntb_device *ndev = nt->ndev;
674 struct pci_dev *pdev = ntb_query_pdev(ndev);
678 /* send the local info, in the opposite order of the way we read it */
679 for (i = 0; i < ntb_max_mw(ndev); i++) {
680 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
681 ntb_get_mw_size(ndev, i) >> 32);
683 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
684 (u32)(ntb_get_mw_size(ndev, i) >> 32),
685 MW0_SZ_HIGH + (i * 2));
689 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
690 (u32) ntb_get_mw_size(ndev, i));
692 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
693 (u32) ntb_get_mw_size(ndev, i),
694 MW0_SZ_LOW + (i * 2));
699 rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
701 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
702 ntb_max_mw(ndev), NUM_MWS);
706 rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
708 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
709 nt->max_qps, NUM_QPS);
713 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
715 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
716 NTB_TRANSPORT_VERSION, VERSION);
720 /* Query the remote side for its info */
721 rc = ntb_read_remote_spad(ndev, VERSION, &val);
723 dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
727 if (val != NTB_TRANSPORT_VERSION)
729 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
731 rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
733 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
737 if (val != nt->max_qps)
739 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
741 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
743 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
747 if (val != ntb_max_mw(ndev))
749 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
751 for (i = 0; i < ntb_max_mw(ndev); i++) {
754 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
756 dev_err(&pdev->dev, "Error reading remote spad %d\n",
757 MW0_SZ_HIGH + (i * 2));
761 val64 = (u64) val << 32;
763 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
765 dev_err(&pdev->dev, "Error reading remote spad %d\n",
766 MW0_SZ_LOW + (i * 2));
772 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
774 rc = ntb_set_mw(nt, i, val64);
779 nt->transport_link = NTB_LINK_UP;
781 for (i = 0; i < nt->max_qps; i++) {
782 struct ntb_transport_qp *qp = &nt->qps[i];
784 ntb_transport_setup_qp_mw(nt, i);
786 if (qp->client_ready == NTB_LINK_UP)
787 schedule_delayed_work(&qp->link_work, 0);
793 for (i = 0; i < ntb_max_mw(ndev); i++)
796 if (ntb_hw_link_status(ndev))
797 schedule_delayed_work(&nt->link_work,
798 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
801 static void ntb_qp_link_work(struct work_struct *work)
803 struct ntb_transport_qp *qp = container_of(work,
804 struct ntb_transport_qp,
806 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
807 struct ntb_transport *nt = qp->transport;
810 WARN_ON(nt->transport_link != NTB_LINK_UP);
812 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
814 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
818 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
820 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
821 val | 1 << qp->qp_num, QP_LINKS);
823 /* query remote spad for qp ready bits */
824 rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
826 dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
828 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
830 /* See if the remote side is up */
831 if (1 << qp->qp_num & val) {
832 qp->qp_link = NTB_LINK_UP;
834 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
835 if (qp->event_handler)
836 qp->event_handler(qp->cb_data, NTB_LINK_UP);
837 } else if (nt->transport_link == NTB_LINK_UP)
838 schedule_delayed_work(&qp->link_work,
839 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
842 static int ntb_transport_init_queue(struct ntb_transport *nt,
845 struct ntb_transport_qp *qp;
846 unsigned int num_qps_mw, tx_size;
850 mw_max = ntb_max_mw(nt->ndev);
851 mw_num = QP_TO_MW(nt->ndev, qp_num);
853 qp = &nt->qps[qp_num];
857 qp->qp_link = NTB_LINK_DOWN;
858 qp->client_ready = NTB_LINK_DOWN;
859 qp->event_handler = NULL;
861 if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
862 num_qps_mw = nt->max_qps / mw_max + 1;
864 num_qps_mw = nt->max_qps / mw_max;
866 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
867 qp_offset = qp_num / mw_max * tx_size;
868 qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
872 qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
876 tx_size -= sizeof(struct ntb_rx_info);
877 qp->rx_info = qp->tx_mw + tx_size;
879 /* Due to housekeeping, there must be atleast 2 buffs */
880 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
881 qp->tx_max_entry = tx_size / qp->tx_max_frame;
883 if (ntb_query_debugfs(nt->ndev)) {
884 char debugfs_name[4];
886 snprintf(debugfs_name, 4, "qp%d", qp_num);
887 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
888 ntb_query_debugfs(nt->ndev));
890 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
892 &ntb_qp_debugfs_stats);
895 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
896 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
898 spin_lock_init(&qp->ntb_rx_pend_q_lock);
899 spin_lock_init(&qp->ntb_rx_free_q_lock);
900 spin_lock_init(&qp->ntb_tx_free_q_lock);
902 INIT_LIST_HEAD(&qp->rx_pend_q);
903 INIT_LIST_HEAD(&qp->rx_free_q);
904 INIT_LIST_HEAD(&qp->tx_free_q);
909 int ntb_transport_init(struct pci_dev *pdev)
911 struct ntb_transport *nt;
914 nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
918 nt->ndev = ntb_register_transport(pdev, nt);
924 nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
932 nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
934 nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
936 nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
943 nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
945 for (i = 0; i < nt->max_qps; i++) {
946 rc = ntb_transport_init_queue(nt, i);
951 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
952 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
954 rc = ntb_register_event_callback(nt->ndev,
955 ntb_transport_event_callback);
959 INIT_LIST_HEAD(&nt->client_devs);
960 rc = ntb_bus_init(nt);
964 if (ntb_hw_link_status(nt->ndev))
965 schedule_delayed_work(&nt->link_work, 0);
970 ntb_unregister_event_callback(nt->ndev);
976 ntb_unregister_transport(nt->ndev);
982 void ntb_transport_free(void *transport)
984 struct ntb_transport *nt = transport;
985 struct ntb_device *ndev = nt->ndev;
988 ntb_transport_link_cleanup(nt);
990 /* verify that all the qp's are freed */
991 for (i = 0; i < nt->max_qps; i++) {
992 if (!test_bit(i, &nt->qp_bitmap))
993 ntb_transport_free_queue(&nt->qps[i]);
994 debugfs_remove_recursive(nt->qps[i].debugfs_dir);
999 cancel_delayed_work_sync(&nt->link_work);
1001 ntb_unregister_event_callback(ndev);
1003 for (i = 0; i < ntb_max_mw(ndev); i++)
1008 ntb_unregister_transport(ndev);
1012 static void ntb_rx_copy_callback(void *data)
1014 struct ntb_queue_entry *entry = data;
1015 struct ntb_transport_qp *qp = entry->qp;
1016 void *cb_data = entry->cb_data;
1017 unsigned int len = entry->len;
1018 struct ntb_payload_header *hdr = entry->rx_hdr;
1020 /* Ensure that the data is fully copied out before clearing the flag */
1024 iowrite32(entry->index, &qp->rx_info->entry);
1026 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1028 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
1029 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1032 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1034 void *buf = entry->buf;
1035 size_t len = entry->len;
1037 memcpy(buf, offset, len);
1039 ntb_rx_copy_callback(entry);
1042 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1045 struct dma_async_tx_descriptor *txd;
1046 struct ntb_transport_qp *qp = entry->qp;
1047 struct dma_chan *chan = qp->dma_chan;
1048 struct dma_device *device;
1049 size_t pay_off, buff_off;
1050 dma_addr_t src, dest;
1051 dma_cookie_t cookie;
1052 void *buf = entry->buf;
1053 unsigned long flags;
1060 if (len < copy_bytes)
1063 device = chan->device;
1064 pay_off = (size_t) offset & ~PAGE_MASK;
1065 buff_off = (size_t) buf & ~PAGE_MASK;
1067 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1070 dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
1071 if (dma_mapping_error(device->dev, dest))
1074 src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
1075 if (dma_mapping_error(device->dev, src))
1078 flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
1080 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
1084 txd->callback = ntb_rx_copy_callback;
1085 txd->callback_param = entry;
1087 cookie = dmaengine_submit(txd);
1088 if (dma_submit_error(cookie))
1091 qp->last_cookie = cookie;
1098 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
1100 dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
1102 /* If the callbacks come out of order, the writing of the index to the
1103 * last completed will be out of order. This may result in the
1104 * receive stalling forever.
1106 dma_sync_wait(chan, qp->last_cookie);
1108 ntb_memcpy_rx(entry, offset);
1112 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1114 struct ntb_payload_header *hdr;
1115 struct ntb_queue_entry *entry;
1118 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1119 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1121 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1123 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1124 "no buffer - HDR ver %u, len %d, flags %x\n",
1125 hdr->ver, hdr->len, hdr->flags);
1126 qp->rx_err_no_buf++;
1130 if (!(hdr->flags & DESC_DONE_FLAG)) {
1131 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1133 qp->rx_ring_empty++;
1137 if (hdr->ver != (u32) qp->rx_pkts) {
1138 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1139 "qp %d: version mismatch, expected %llu - got %u\n",
1140 qp->qp_num, qp->rx_pkts, hdr->ver);
1141 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1147 if (hdr->flags & LINK_DOWN_FLAG) {
1148 ntb_qp_link_down(qp);
1153 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1154 "rx offset %u, ver %u - %d payload received, buf size %d\n",
1155 qp->rx_index, hdr->ver, hdr->len, entry->len);
1157 qp->rx_bytes += hdr->len;
1160 if (hdr->len > entry->len) {
1162 dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
1163 "RX overflow! Wanted %d got %d\n",
1164 hdr->len, entry->len);
1169 entry->index = qp->rx_index;
1170 entry->rx_hdr = hdr;
1172 ntb_async_rx(entry, offset, hdr->len);
1176 qp->rx_index %= qp->rx_max_entry;
1181 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
1183 /* Ensure that the data is fully copied out before clearing the flag */
1186 iowrite32(qp->rx_index, &qp->rx_info->entry);
1191 static void ntb_transport_rx(unsigned long data)
1193 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
1196 /* Limit the number of packets processed in a single interrupt to
1197 * provide fairness to others
1199 for (i = 0; i < qp->rx_max_entry; i++) {
1200 rc = ntb_process_rxc(qp);
1206 dma_async_issue_pending(qp->dma_chan);
1209 static void ntb_transport_rxc_db(void *data, int db_num)
1211 struct ntb_transport_qp *qp = data;
1213 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
1216 tasklet_schedule(&qp->rx_work);
1219 static void ntb_tx_copy_callback(void *data)
1221 struct ntb_queue_entry *entry = data;
1222 struct ntb_transport_qp *qp = entry->qp;
1223 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1225 /* Ensure that the data is fully copied out before setting the flags */
1227 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1229 ntb_ring_doorbell(qp->ndev, qp->qp_num);
1231 /* The entry length can only be zero if the packet is intended to be a
1232 * "link down" or similar. Since no payload is being sent in these
1233 * cases, there is nothing to add to the completion queue.
1235 if (entry->len > 0) {
1236 qp->tx_bytes += entry->len;
1239 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1243 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1246 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1248 memcpy_toio(offset, entry->buf, entry->len);
1250 ntb_tx_copy_callback(entry);
1253 static void ntb_async_tx(struct ntb_transport_qp *qp,
1254 struct ntb_queue_entry *entry)
1256 struct ntb_payload_header __iomem *hdr;
1257 struct dma_async_tx_descriptor *txd;
1258 struct dma_chan *chan = qp->dma_chan;
1259 struct dma_device *device;
1260 size_t dest_off, buff_off;
1261 dma_addr_t src, dest;
1262 dma_cookie_t cookie;
1263 void __iomem *offset;
1264 size_t len = entry->len;
1265 void *buf = entry->buf;
1266 unsigned long flags;
1268 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1269 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1270 entry->tx_hdr = hdr;
1272 iowrite32(entry->len, &hdr->len);
1273 iowrite32((u32) qp->tx_pkts, &hdr->ver);
1278 if (len < copy_bytes)
1281 device = chan->device;
1282 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1283 buff_off = (size_t) buf & ~PAGE_MASK;
1284 dest_off = (size_t) dest & ~PAGE_MASK;
1286 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1289 src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
1290 if (dma_mapping_error(device->dev, src))
1293 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
1294 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
1298 txd->callback = ntb_tx_copy_callback;
1299 txd->callback_param = entry;
1301 cookie = dmaengine_submit(txd);
1302 if (dma_submit_error(cookie))
1305 dma_async_issue_pending(chan);
1310 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
1312 ntb_memcpy_tx(entry, offset);
1316 static int ntb_process_tx(struct ntb_transport_qp *qp,
1317 struct ntb_queue_entry *entry)
1319 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
1320 qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
1322 if (qp->tx_index == qp->remote_rx_info->entry) {
1327 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1329 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1331 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1336 ntb_async_tx(qp, entry);
1339 qp->tx_index %= qp->tx_max_entry;
1346 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1348 struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
1349 struct ntb_queue_entry *entry;
1352 if (qp->qp_link == NTB_LINK_DOWN)
1355 qp->qp_link = NTB_LINK_DOWN;
1356 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1358 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1359 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1368 entry->cb_data = NULL;
1371 entry->flags = LINK_DOWN_FLAG;
1373 rc = ntb_process_tx(qp, entry);
1375 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1380 * ntb_transport_create_queue - Create a new NTB transport layer queue
1381 * @rx_handler: receive callback function
1382 * @tx_handler: transmit callback function
1383 * @event_handler: event callback function
1385 * Create a new NTB transport layer queue and provide the queue with a callback
1386 * routine for both transmit and receive. The receive callback routine will be
1387 * used to pass up data when the transport has received it on the queue. The
1388 * transmit callback routine will be called when the transport has completed the
1389 * transmission of the data on the queue and the data is ready to be freed.
1391 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1393 struct ntb_transport_qp *
1394 ntb_transport_create_queue(void *data, struct pci_dev *pdev,
1395 const struct ntb_queue_handlers *handlers)
1397 struct ntb_queue_entry *entry;
1398 struct ntb_transport_qp *qp;
1399 struct ntb_transport *nt;
1400 unsigned int free_queue;
1403 nt = ntb_find_transport(pdev);
1407 free_queue = ffs(nt->qp_bitmap);
1411 /* decrement free_queue to make it zero based */
1414 clear_bit(free_queue, &nt->qp_bitmap);
1416 qp = &nt->qps[free_queue];
1418 qp->rx_handler = handlers->rx_handler;
1419 qp->tx_handler = handlers->tx_handler;
1420 qp->event_handler = handlers->event_handler;
1423 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1424 if (!qp->dma_chan) {
1426 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1429 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1430 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1435 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1439 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1440 entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
1445 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1449 tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
1451 rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
1452 ntb_transport_rxc_db);
1456 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1461 tasklet_disable(&qp->rx_work);
1463 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1466 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1470 set_bit(free_queue, &nt->qp_bitmap);
1474 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1477 * ntb_transport_free_queue - Frees NTB transport queue
1478 * @qp: NTB queue to be freed
1480 * Frees NTB transport queue
1482 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1484 struct pci_dev *pdev;
1485 struct ntb_queue_entry *entry;
1490 pdev = ntb_query_pdev(qp->ndev);
1493 struct dma_chan *chan = qp->dma_chan;
1494 /* Putting the dma_chan to NULL will force any new traffic to be
1495 * processed by the CPU instead of the DAM engine
1497 qp->dma_chan = NULL;
1499 /* Try to be nice and wait for any queued DMA engine
1500 * transactions to process before smashing it with a rock
1502 dma_sync_wait(chan, qp->last_cookie);
1503 dmaengine_terminate_all(chan);
1507 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
1508 tasklet_disable(&qp->rx_work);
1510 cancel_delayed_work_sync(&qp->link_work);
1512 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1515 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1516 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1520 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1523 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
1525 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1527 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1530 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1531 * @qp: NTB queue to be freed
1532 * @len: pointer to variable to write enqueued buffers length
1534 * Dequeues unused buffers from receive queue. Should only be used during
1537 * RETURNS: NULL error value on error, or void* for success.
1539 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1541 struct ntb_queue_entry *entry;
1544 if (!qp || qp->client_ready == NTB_LINK_UP)
1547 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1551 buf = entry->cb_data;
1554 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1558 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1561 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1562 * @qp: NTB transport layer queue the entry is to be enqueued on
1563 * @cb: per buffer pointer for callback function to use
1564 * @data: pointer to data buffer that incoming packets will be copied into
1565 * @len: length of the data buffer
1567 * Enqueue a new receive buffer onto the transport queue into which a NTB
1568 * payload can be received into.
1570 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1572 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1575 struct ntb_queue_entry *entry;
1580 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1584 entry->cb_data = cb;
1588 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1592 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1595 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1596 * @qp: NTB transport layer queue the entry is to be enqueued on
1597 * @cb: per buffer pointer for callback function to use
1598 * @data: pointer to data buffer that will be sent
1599 * @len: length of the data buffer
1601 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1602 * payload will be transmitted. This assumes that a lock is being held to
1603 * serialize access to the qp.
1605 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1607 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1610 struct ntb_queue_entry *entry;
1613 if (!qp || qp->qp_link != NTB_LINK_UP || !len)
1616 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1618 qp->tx_err_no_buf++;
1622 entry->cb_data = cb;
1627 rc = ntb_process_tx(qp, entry);
1629 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1634 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1637 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1638 * @qp: NTB transport layer queue to be enabled
1640 * Notify NTB transport layer of client readiness to use queue
1642 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1647 qp->client_ready = NTB_LINK_UP;
1649 if (qp->transport->transport_link == NTB_LINK_UP)
1650 schedule_delayed_work(&qp->link_work, 0);
1652 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1655 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1656 * @qp: NTB transport layer queue to be disabled
1658 * Notify NTB transport layer of client's desire to no longer receive data on
1659 * transport queue specified. It is the client's responsibility to ensure all
1660 * entries on queue are purged or otherwise handled appropriately.
1662 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1664 struct pci_dev *pdev;
1670 pdev = ntb_query_pdev(qp->ndev);
1671 qp->client_ready = NTB_LINK_DOWN;
1673 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
1675 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
1679 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
1680 val & ~(1 << qp->qp_num));
1682 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
1683 val & ~(1 << qp->qp_num), QP_LINKS);
1685 if (qp->qp_link == NTB_LINK_UP)
1686 ntb_send_link_down(qp);
1688 cancel_delayed_work_sync(&qp->link_work);
1690 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1693 * ntb_transport_link_query - Query transport link state
1694 * @qp: NTB transport layer queue to be queried
1696 * Query connectivity to the remote system of the NTB transport queue
1698 * RETURNS: true for link up or false for link down
1700 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1705 return qp->qp_link == NTB_LINK_UP;
1707 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1710 * ntb_transport_qp_num - Query the qp number
1711 * @qp: NTB transport layer queue to be queried
1713 * Query qp number of the NTB transport queue
1715 * RETURNS: a zero based number specifying the qp number
1717 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1724 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1727 * ntb_transport_max_size - Query the max payload size of a qp
1728 * @qp: NTB transport layer queue to be queried
1730 * Query the maximum payload size permissible on the given qp
1732 * RETURNS: the max payload size of a qp
1734 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1742 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1744 /* If DMA engine usage is possible, try to find the max size for that */
1745 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1746 max -= max % (1 << qp->dma_chan->device->copy_align);
1750 EXPORT_SYMBOL_GPL(ntb_transport_max_size);