NTB: allocate number transport entries depending on size of ring size
[cascardo/linux.git] / drivers / ntb / ntb_transport.c
1 /*
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  *   redistributing this file, you may do so under either license.
4  *
5  *   GPL LICENSE SUMMARY
6  *
7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9  *
10  *   This program is free software; you can redistribute it and/or modify
11  *   it under the terms of version 2 of the GNU General Public License as
12  *   published by the Free Software Foundation.
13  *
14  *   BSD LICENSE
15  *
16  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18  *
19  *   Redistribution and use in source and binary forms, with or without
20  *   modification, are permitted provided that the following conditions
21  *   are met:
22  *
23  *     * Redistributions of source code must retain the above copyright
24  *       notice, this list of conditions and the following disclaimer.
25  *     * Redistributions in binary form must reproduce the above copy
26  *       notice, this list of conditions and the following disclaimer in
27  *       the documentation and/or other materials provided with the
28  *       distribution.
29  *     * Neither the name of Intel Corporation nor the names of its
30  *       contributors may be used to endorse or promote products derived
31  *       from this software without specific prior written permission.
32  *
33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  *
45  * PCIe NTB Transport Linux driver
46  *
47  * Contact Information:
48  * Jon Mason <jon.mason@intel.com>
49  */
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include <linux/uaccess.h>
62 #include "linux/ntb.h"
63 #include "linux/ntb_transport.h"
64
65 #define NTB_TRANSPORT_VERSION   4
66 #define NTB_TRANSPORT_VER       "4"
67 #define NTB_TRANSPORT_NAME      "ntb_transport"
68 #define NTB_TRANSPORT_DESC      "Software Queue-Pair Transport over NTB"
69
70 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
71 MODULE_VERSION(NTB_TRANSPORT_VER);
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_AUTHOR("Intel Corporation");
74
75 static unsigned long max_mw_size;
76 module_param(max_mw_size, ulong, 0644);
77 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
78
79 static unsigned int transport_mtu = 0x10000;
80 module_param(transport_mtu, uint, 0644);
81 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
82
83 static unsigned char max_num_clients;
84 module_param(max_num_clients, byte, 0644);
85 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
86
87 static unsigned int copy_bytes = 1024;
88 module_param(copy_bytes, uint, 0644);
89 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
90
91 static bool use_dma;
92 module_param(use_dma, bool, 0644);
93 MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
94
95 static struct dentry *nt_debugfs_dir;
96
97 struct ntb_queue_entry {
98         /* ntb_queue list reference */
99         struct list_head entry;
100         /* pointers to data to be transferred */
101         void *cb_data;
102         void *buf;
103         unsigned int len;
104         unsigned int flags;
105
106         struct ntb_transport_qp *qp;
107         union {
108                 struct ntb_payload_header __iomem *tx_hdr;
109                 struct ntb_payload_header *rx_hdr;
110         };
111         unsigned int index;
112 };
113
114 struct ntb_rx_info {
115         unsigned int entry;
116 };
117
118 struct ntb_transport_qp {
119         struct ntb_transport_ctx *transport;
120         struct ntb_dev *ndev;
121         void *cb_data;
122         struct dma_chan *tx_dma_chan;
123         struct dma_chan *rx_dma_chan;
124
125         bool client_ready;
126         bool link_is_up;
127         bool active;
128
129         u8 qp_num;      /* Only 64 QP's are allowed.  0-63 */
130         u64 qp_bit;
131
132         struct ntb_rx_info __iomem *rx_info;
133         struct ntb_rx_info *remote_rx_info;
134
135         void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
136                            void *data, int len);
137         struct list_head tx_free_q;
138         spinlock_t ntb_tx_free_q_lock;
139         void __iomem *tx_mw;
140         dma_addr_t tx_mw_phys;
141         unsigned int tx_index;
142         unsigned int tx_max_entry;
143         unsigned int tx_max_frame;
144
145         void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
146                            void *data, int len);
147         struct list_head rx_post_q;
148         struct list_head rx_pend_q;
149         struct list_head rx_free_q;
150         /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
151         spinlock_t ntb_rx_q_lock;
152         void *rx_buff;
153         unsigned int rx_index;
154         unsigned int rx_max_entry;
155         unsigned int rx_max_frame;
156         unsigned int rx_alloc_entry;
157         dma_cookie_t last_cookie;
158         struct tasklet_struct rxc_db_work;
159
160         void (*event_handler)(void *data, int status);
161         struct delayed_work link_work;
162         struct work_struct link_cleanup;
163
164         struct dentry *debugfs_dir;
165         struct dentry *debugfs_stats;
166
167         /* Stats */
168         u64 rx_bytes;
169         u64 rx_pkts;
170         u64 rx_ring_empty;
171         u64 rx_err_no_buf;
172         u64 rx_err_oflow;
173         u64 rx_err_ver;
174         u64 rx_memcpy;
175         u64 rx_async;
176         u64 dma_rx_prep_err;
177         u64 tx_bytes;
178         u64 tx_pkts;
179         u64 tx_ring_full;
180         u64 tx_err_no_buf;
181         u64 tx_memcpy;
182         u64 tx_async;
183         u64 dma_tx_prep_err;
184 };
185
186 struct ntb_transport_mw {
187         phys_addr_t phys_addr;
188         resource_size_t phys_size;
189         resource_size_t xlat_align;
190         resource_size_t xlat_align_size;
191         void __iomem *vbase;
192         size_t xlat_size;
193         size_t buff_size;
194         void *virt_addr;
195         dma_addr_t dma_addr;
196 };
197
198 struct ntb_transport_client_dev {
199         struct list_head entry;
200         struct ntb_transport_ctx *nt;
201         struct device dev;
202 };
203
204 struct ntb_transport_ctx {
205         struct list_head entry;
206         struct list_head client_devs;
207
208         struct ntb_dev *ndev;
209
210         struct ntb_transport_mw *mw_vec;
211         struct ntb_transport_qp *qp_vec;
212         unsigned int mw_count;
213         unsigned int qp_count;
214         u64 qp_bitmap;
215         u64 qp_bitmap_free;
216
217         bool link_is_up;
218         struct delayed_work link_work;
219         struct work_struct link_cleanup;
220
221         struct dentry *debugfs_node_dir;
222 };
223
224 enum {
225         DESC_DONE_FLAG = BIT(0),
226         LINK_DOWN_FLAG = BIT(1),
227 };
228
229 struct ntb_payload_header {
230         unsigned int ver;
231         unsigned int len;
232         unsigned int flags;
233 };
234
235 enum {
236         VERSION = 0,
237         QP_LINKS,
238         NUM_QPS,
239         NUM_MWS,
240         MW0_SZ_HIGH,
241         MW0_SZ_LOW,
242         MW1_SZ_HIGH,
243         MW1_SZ_LOW,
244         MAX_SPAD,
245 };
246
247 #define dev_client_dev(__dev) \
248         container_of((__dev), struct ntb_transport_client_dev, dev)
249
250 #define drv_client(__drv) \
251         container_of((__drv), struct ntb_transport_client, driver)
252
253 #define QP_TO_MW(nt, qp)        ((qp) % nt->mw_count)
254 #define NTB_QP_DEF_NUM_ENTRIES  100
255 #define NTB_LINK_DOWN_TIMEOUT   10
256 #define DMA_RETRIES             20
257 #define DMA_OUT_RESOURCE_TO     50
258
259 static void ntb_transport_rxc_db(unsigned long data);
260 static const struct ntb_ctx_ops ntb_transport_ops;
261 static struct ntb_client ntb_transport_client;
262
263 static int ntb_transport_bus_match(struct device *dev,
264                                    struct device_driver *drv)
265 {
266         return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
267 }
268
269 static int ntb_transport_bus_probe(struct device *dev)
270 {
271         const struct ntb_transport_client *client;
272         int rc = -EINVAL;
273
274         get_device(dev);
275
276         client = drv_client(dev->driver);
277         rc = client->probe(dev);
278         if (rc)
279                 put_device(dev);
280
281         return rc;
282 }
283
284 static int ntb_transport_bus_remove(struct device *dev)
285 {
286         const struct ntb_transport_client *client;
287
288         client = drv_client(dev->driver);
289         client->remove(dev);
290
291         put_device(dev);
292
293         return 0;
294 }
295
296 static struct bus_type ntb_transport_bus = {
297         .name = "ntb_transport",
298         .match = ntb_transport_bus_match,
299         .probe = ntb_transport_bus_probe,
300         .remove = ntb_transport_bus_remove,
301 };
302
303 static LIST_HEAD(ntb_transport_list);
304
305 static int ntb_bus_init(struct ntb_transport_ctx *nt)
306 {
307         list_add_tail(&nt->entry, &ntb_transport_list);
308         return 0;
309 }
310
311 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
312 {
313         struct ntb_transport_client_dev *client_dev, *cd;
314
315         list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
316                 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
317                         dev_name(&client_dev->dev));
318                 list_del(&client_dev->entry);
319                 device_unregister(&client_dev->dev);
320         }
321
322         list_del(&nt->entry);
323 }
324
325 static void ntb_transport_client_release(struct device *dev)
326 {
327         struct ntb_transport_client_dev *client_dev;
328
329         client_dev = dev_client_dev(dev);
330         kfree(client_dev);
331 }
332
333 /**
334  * ntb_transport_unregister_client_dev - Unregister NTB client device
335  * @device_name: Name of NTB client device
336  *
337  * Unregister an NTB client device with the NTB transport layer
338  */
339 void ntb_transport_unregister_client_dev(char *device_name)
340 {
341         struct ntb_transport_client_dev *client, *cd;
342         struct ntb_transport_ctx *nt;
343
344         list_for_each_entry(nt, &ntb_transport_list, entry)
345                 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
346                         if (!strncmp(dev_name(&client->dev), device_name,
347                                      strlen(device_name))) {
348                                 list_del(&client->entry);
349                                 device_unregister(&client->dev);
350                         }
351 }
352 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
353
354 /**
355  * ntb_transport_register_client_dev - Register NTB client device
356  * @device_name: Name of NTB client device
357  *
358  * Register an NTB client device with the NTB transport layer
359  */
360 int ntb_transport_register_client_dev(char *device_name)
361 {
362         struct ntb_transport_client_dev *client_dev;
363         struct ntb_transport_ctx *nt;
364         int node;
365         int rc, i = 0;
366
367         if (list_empty(&ntb_transport_list))
368                 return -ENODEV;
369
370         list_for_each_entry(nt, &ntb_transport_list, entry) {
371                 struct device *dev;
372
373                 node = dev_to_node(&nt->ndev->dev);
374
375                 client_dev = kzalloc_node(sizeof(*client_dev),
376                                           GFP_KERNEL, node);
377                 if (!client_dev) {
378                         rc = -ENOMEM;
379                         goto err;
380                 }
381
382                 dev = &client_dev->dev;
383
384                 /* setup and register client devices */
385                 dev_set_name(dev, "%s%d", device_name, i);
386                 dev->bus = &ntb_transport_bus;
387                 dev->release = ntb_transport_client_release;
388                 dev->parent = &nt->ndev->dev;
389
390                 rc = device_register(dev);
391                 if (rc) {
392                         kfree(client_dev);
393                         goto err;
394                 }
395
396                 list_add_tail(&client_dev->entry, &nt->client_devs);
397                 i++;
398         }
399
400         return 0;
401
402 err:
403         ntb_transport_unregister_client_dev(device_name);
404
405         return rc;
406 }
407 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
408
409 /**
410  * ntb_transport_register_client - Register NTB client driver
411  * @drv: NTB client driver to be registered
412  *
413  * Register an NTB client driver with the NTB transport layer
414  *
415  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
416  */
417 int ntb_transport_register_client(struct ntb_transport_client *drv)
418 {
419         drv->driver.bus = &ntb_transport_bus;
420
421         if (list_empty(&ntb_transport_list))
422                 return -ENODEV;
423
424         return driver_register(&drv->driver);
425 }
426 EXPORT_SYMBOL_GPL(ntb_transport_register_client);
427
428 /**
429  * ntb_transport_unregister_client - Unregister NTB client driver
430  * @drv: NTB client driver to be unregistered
431  *
432  * Unregister an NTB client driver with the NTB transport layer
433  *
434  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
435  */
436 void ntb_transport_unregister_client(struct ntb_transport_client *drv)
437 {
438         driver_unregister(&drv->driver);
439 }
440 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
441
442 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
443                             loff_t *offp)
444 {
445         struct ntb_transport_qp *qp;
446         char *buf;
447         ssize_t ret, out_offset, out_count;
448
449         qp = filp->private_data;
450
451         if (!qp || !qp->link_is_up)
452                 return 0;
453
454         out_count = 1000;
455
456         buf = kmalloc(out_count, GFP_KERNEL);
457         if (!buf)
458                 return -ENOMEM;
459
460         out_offset = 0;
461         out_offset += snprintf(buf + out_offset, out_count - out_offset,
462                                "\nNTB QP stats:\n\n");
463         out_offset += snprintf(buf + out_offset, out_count - out_offset,
464                                "rx_bytes - \t%llu\n", qp->rx_bytes);
465         out_offset += snprintf(buf + out_offset, out_count - out_offset,
466                                "rx_pkts - \t%llu\n", qp->rx_pkts);
467         out_offset += snprintf(buf + out_offset, out_count - out_offset,
468                                "rx_memcpy - \t%llu\n", qp->rx_memcpy);
469         out_offset += snprintf(buf + out_offset, out_count - out_offset,
470                                "rx_async - \t%llu\n", qp->rx_async);
471         out_offset += snprintf(buf + out_offset, out_count - out_offset,
472                                "rx_ring_empty - %llu\n", qp->rx_ring_empty);
473         out_offset += snprintf(buf + out_offset, out_count - out_offset,
474                                "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
475         out_offset += snprintf(buf + out_offset, out_count - out_offset,
476                                "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
477         out_offset += snprintf(buf + out_offset, out_count - out_offset,
478                                "rx_err_ver - \t%llu\n", qp->rx_err_ver);
479         out_offset += snprintf(buf + out_offset, out_count - out_offset,
480                                "rx_buff - \t0x%p\n", qp->rx_buff);
481         out_offset += snprintf(buf + out_offset, out_count - out_offset,
482                                "rx_index - \t%u\n", qp->rx_index);
483         out_offset += snprintf(buf + out_offset, out_count - out_offset,
484                                "rx_max_entry - \t%u\n", qp->rx_max_entry);
485         out_offset += snprintf(buf + out_offset, out_count - out_offset,
486                                "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
487
488         out_offset += snprintf(buf + out_offset, out_count - out_offset,
489                                "tx_bytes - \t%llu\n", qp->tx_bytes);
490         out_offset += snprintf(buf + out_offset, out_count - out_offset,
491                                "tx_pkts - \t%llu\n", qp->tx_pkts);
492         out_offset += snprintf(buf + out_offset, out_count - out_offset,
493                                "tx_memcpy - \t%llu\n", qp->tx_memcpy);
494         out_offset += snprintf(buf + out_offset, out_count - out_offset,
495                                "tx_async - \t%llu\n", qp->tx_async);
496         out_offset += snprintf(buf + out_offset, out_count - out_offset,
497                                "tx_ring_full - \t%llu\n", qp->tx_ring_full);
498         out_offset += snprintf(buf + out_offset, out_count - out_offset,
499                                "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
500         out_offset += snprintf(buf + out_offset, out_count - out_offset,
501                                "tx_mw - \t0x%p\n", qp->tx_mw);
502         out_offset += snprintf(buf + out_offset, out_count - out_offset,
503                                "tx_index (H) - \t%u\n", qp->tx_index);
504         out_offset += snprintf(buf + out_offset, out_count - out_offset,
505                                "RRI (T) - \t%u\n",
506                                qp->remote_rx_info->entry);
507         out_offset += snprintf(buf + out_offset, out_count - out_offset,
508                                "tx_max_entry - \t%u\n", qp->tx_max_entry);
509         out_offset += snprintf(buf + out_offset, out_count - out_offset,
510                                "free tx - \t%u\n",
511                                ntb_transport_tx_free_entry(qp));
512         out_offset += snprintf(buf + out_offset, out_count - out_offset,
513                                "DMA tx prep err - \t%llu\n",
514                                qp->dma_tx_prep_err);
515         out_offset += snprintf(buf + out_offset, out_count - out_offset,
516                                "DMA rx prep err - \t%llu\n",
517                                qp->dma_rx_prep_err);
518
519         out_offset += snprintf(buf + out_offset, out_count - out_offset,
520                                "\n");
521         out_offset += snprintf(buf + out_offset, out_count - out_offset,
522                                "Using TX DMA - \t%s\n",
523                                qp->tx_dma_chan ? "Yes" : "No");
524         out_offset += snprintf(buf + out_offset, out_count - out_offset,
525                                "Using RX DMA - \t%s\n",
526                                qp->rx_dma_chan ? "Yes" : "No");
527         out_offset += snprintf(buf + out_offset, out_count - out_offset,
528                                "QP Link - \t%s\n",
529                                qp->link_is_up ? "Up" : "Down");
530         out_offset += snprintf(buf + out_offset, out_count - out_offset,
531                                "\n");
532
533         if (out_offset > out_count)
534                 out_offset = out_count;
535
536         ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
537         kfree(buf);
538         return ret;
539 }
540
541 static const struct file_operations ntb_qp_debugfs_stats = {
542         .owner = THIS_MODULE,
543         .open = simple_open,
544         .read = debugfs_read,
545 };
546
547 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
548                          struct list_head *list)
549 {
550         unsigned long flags;
551
552         spin_lock_irqsave(lock, flags);
553         list_add_tail(entry, list);
554         spin_unlock_irqrestore(lock, flags);
555 }
556
557 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
558                                            struct list_head *list)
559 {
560         struct ntb_queue_entry *entry;
561         unsigned long flags;
562
563         spin_lock_irqsave(lock, flags);
564         if (list_empty(list)) {
565                 entry = NULL;
566                 goto out;
567         }
568         entry = list_first_entry(list, struct ntb_queue_entry, entry);
569         list_del(&entry->entry);
570
571 out:
572         spin_unlock_irqrestore(lock, flags);
573
574         return entry;
575 }
576
577 static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
578                                            struct list_head *list,
579                                            struct list_head *to_list)
580 {
581         struct ntb_queue_entry *entry;
582         unsigned long flags;
583
584         spin_lock_irqsave(lock, flags);
585
586         if (list_empty(list)) {
587                 entry = NULL;
588         } else {
589                 entry = list_first_entry(list, struct ntb_queue_entry, entry);
590                 list_move_tail(&entry->entry, to_list);
591         }
592
593         spin_unlock_irqrestore(lock, flags);
594
595         return entry;
596 }
597
598 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
599                                      unsigned int qp_num)
600 {
601         struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
602         struct ntb_transport_mw *mw;
603         struct ntb_dev *ndev = nt->ndev;
604         struct ntb_queue_entry *entry;
605         unsigned int rx_size, num_qps_mw;
606         unsigned int mw_num, mw_count, qp_count;
607         unsigned int i;
608         int node;
609
610         mw_count = nt->mw_count;
611         qp_count = nt->qp_count;
612
613         mw_num = QP_TO_MW(nt, qp_num);
614         mw = &nt->mw_vec[mw_num];
615
616         if (!mw->virt_addr)
617                 return -ENOMEM;
618
619         if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
620                 num_qps_mw = qp_count / mw_count + 1;
621         else
622                 num_qps_mw = qp_count / mw_count;
623
624         rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
625         qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
626         rx_size -= sizeof(struct ntb_rx_info);
627
628         qp->remote_rx_info = qp->rx_buff + rx_size;
629
630         /* Due to housekeeping, there must be atleast 2 buffs */
631         qp->rx_max_frame = min(transport_mtu, rx_size / 2);
632         qp->rx_max_entry = rx_size / qp->rx_max_frame;
633         qp->rx_index = 0;
634
635         /*
636          * Checking to see if we have more entries than the default.
637          * We should add additional entries if that is the case so we
638          * can be in sync with the transport frames.
639          */
640         node = dev_to_node(&ndev->dev);
641         for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
642                 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
643                 if (!entry)
644                         return -ENOMEM;
645
646                 entry->qp = qp;
647                 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
648                              &qp->rx_free_q);
649                 qp->rx_alloc_entry++;
650         }
651
652         qp->remote_rx_info->entry = qp->rx_max_entry - 1;
653
654         /* setup the hdr offsets with 0's */
655         for (i = 0; i < qp->rx_max_entry; i++) {
656                 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
657                                 sizeof(struct ntb_payload_header));
658                 memset(offset, 0, sizeof(struct ntb_payload_header));
659         }
660
661         qp->rx_pkts = 0;
662         qp->tx_pkts = 0;
663         qp->tx_index = 0;
664
665         return 0;
666 }
667
668 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
669 {
670         struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
671         struct pci_dev *pdev = nt->ndev->pdev;
672
673         if (!mw->virt_addr)
674                 return;
675
676         ntb_mw_clear_trans(nt->ndev, num_mw);
677         dma_free_coherent(&pdev->dev, mw->buff_size,
678                           mw->virt_addr, mw->dma_addr);
679         mw->xlat_size = 0;
680         mw->buff_size = 0;
681         mw->virt_addr = NULL;
682 }
683
684 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
685                       resource_size_t size)
686 {
687         struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
688         struct pci_dev *pdev = nt->ndev->pdev;
689         size_t xlat_size, buff_size;
690         int rc;
691
692         if (!size)
693                 return -EINVAL;
694
695         xlat_size = round_up(size, mw->xlat_align_size);
696         buff_size = round_up(size, mw->xlat_align);
697
698         /* No need to re-setup */
699         if (mw->xlat_size == xlat_size)
700                 return 0;
701
702         if (mw->buff_size)
703                 ntb_free_mw(nt, num_mw);
704
705         /* Alloc memory for receiving data.  Must be aligned */
706         mw->xlat_size = xlat_size;
707         mw->buff_size = buff_size;
708
709         mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
710                                            &mw->dma_addr, GFP_KERNEL);
711         if (!mw->virt_addr) {
712                 mw->xlat_size = 0;
713                 mw->buff_size = 0;
714                 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
715                         buff_size);
716                 return -ENOMEM;
717         }
718
719         /*
720          * we must ensure that the memory address allocated is BAR size
721          * aligned in order for the XLAT register to take the value. This
722          * is a requirement of the hardware. It is recommended to setup CMA
723          * for BAR sizes equal or greater than 4MB.
724          */
725         if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
726                 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
727                         &mw->dma_addr);
728                 ntb_free_mw(nt, num_mw);
729                 return -ENOMEM;
730         }
731
732         /* Notify HW the memory location of the receive buffer */
733         rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
734         if (rc) {
735                 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
736                 ntb_free_mw(nt, num_mw);
737                 return -EIO;
738         }
739
740         return 0;
741 }
742
743 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
744 {
745         qp->link_is_up = false;
746         qp->active = false;
747
748         qp->tx_index = 0;
749         qp->rx_index = 0;
750         qp->rx_bytes = 0;
751         qp->rx_pkts = 0;
752         qp->rx_ring_empty = 0;
753         qp->rx_err_no_buf = 0;
754         qp->rx_err_oflow = 0;
755         qp->rx_err_ver = 0;
756         qp->rx_memcpy = 0;
757         qp->rx_async = 0;
758         qp->tx_bytes = 0;
759         qp->tx_pkts = 0;
760         qp->tx_ring_full = 0;
761         qp->tx_err_no_buf = 0;
762         qp->tx_memcpy = 0;
763         qp->tx_async = 0;
764         qp->dma_tx_prep_err = 0;
765         qp->dma_rx_prep_err = 0;
766 }
767
768 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
769 {
770         struct ntb_transport_ctx *nt = qp->transport;
771         struct pci_dev *pdev = nt->ndev->pdev;
772
773         dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
774
775         cancel_delayed_work_sync(&qp->link_work);
776         ntb_qp_link_down_reset(qp);
777
778         if (qp->event_handler)
779                 qp->event_handler(qp->cb_data, qp->link_is_up);
780 }
781
782 static void ntb_qp_link_cleanup_work(struct work_struct *work)
783 {
784         struct ntb_transport_qp *qp = container_of(work,
785                                                    struct ntb_transport_qp,
786                                                    link_cleanup);
787         struct ntb_transport_ctx *nt = qp->transport;
788
789         ntb_qp_link_cleanup(qp);
790
791         if (nt->link_is_up)
792                 schedule_delayed_work(&qp->link_work,
793                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
794 }
795
796 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
797 {
798         schedule_work(&qp->link_cleanup);
799 }
800
801 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
802 {
803         struct ntb_transport_qp *qp;
804         u64 qp_bitmap_alloc;
805         int i;
806
807         qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
808
809         /* Pass along the info to any clients */
810         for (i = 0; i < nt->qp_count; i++)
811                 if (qp_bitmap_alloc & BIT_ULL(i)) {
812                         qp = &nt->qp_vec[i];
813                         ntb_qp_link_cleanup(qp);
814                         cancel_work_sync(&qp->link_cleanup);
815                         cancel_delayed_work_sync(&qp->link_work);
816                 }
817
818         if (!nt->link_is_up)
819                 cancel_delayed_work_sync(&nt->link_work);
820
821         /* The scratchpad registers keep the values if the remote side
822          * goes down, blast them now to give them a sane value the next
823          * time they are accessed
824          */
825         for (i = 0; i < MAX_SPAD; i++)
826                 ntb_spad_write(nt->ndev, i, 0);
827 }
828
829 static void ntb_transport_link_cleanup_work(struct work_struct *work)
830 {
831         struct ntb_transport_ctx *nt =
832                 container_of(work, struct ntb_transport_ctx, link_cleanup);
833
834         ntb_transport_link_cleanup(nt);
835 }
836
837 static void ntb_transport_event_callback(void *data)
838 {
839         struct ntb_transport_ctx *nt = data;
840
841         if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
842                 schedule_delayed_work(&nt->link_work, 0);
843         else
844                 schedule_work(&nt->link_cleanup);
845 }
846
847 static void ntb_transport_link_work(struct work_struct *work)
848 {
849         struct ntb_transport_ctx *nt =
850                 container_of(work, struct ntb_transport_ctx, link_work.work);
851         struct ntb_dev *ndev = nt->ndev;
852         struct pci_dev *pdev = ndev->pdev;
853         resource_size_t size;
854         u32 val;
855         int rc = 0, i, spad;
856
857         /* send the local info, in the opposite order of the way we read it */
858         for (i = 0; i < nt->mw_count; i++) {
859                 size = nt->mw_vec[i].phys_size;
860
861                 if (max_mw_size && size > max_mw_size)
862                         size = max_mw_size;
863
864                 spad = MW0_SZ_HIGH + (i * 2);
865                 ntb_peer_spad_write(ndev, spad, upper_32_bits(size));
866
867                 spad = MW0_SZ_LOW + (i * 2);
868                 ntb_peer_spad_write(ndev, spad, lower_32_bits(size));
869         }
870
871         ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
872
873         ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
874
875         ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
876
877         /* Query the remote side for its info */
878         val = ntb_spad_read(ndev, VERSION);
879         dev_dbg(&pdev->dev, "Remote version = %d\n", val);
880         if (val != NTB_TRANSPORT_VERSION)
881                 goto out;
882
883         val = ntb_spad_read(ndev, NUM_QPS);
884         dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
885         if (val != nt->qp_count)
886                 goto out;
887
888         val = ntb_spad_read(ndev, NUM_MWS);
889         dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
890         if (val != nt->mw_count)
891                 goto out;
892
893         for (i = 0; i < nt->mw_count; i++) {
894                 u64 val64;
895
896                 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
897                 val64 = (u64)val << 32;
898
899                 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
900                 val64 |= val;
901
902                 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
903
904                 rc = ntb_set_mw(nt, i, val64);
905                 if (rc)
906                         goto out1;
907         }
908
909         nt->link_is_up = true;
910
911         for (i = 0; i < nt->qp_count; i++) {
912                 struct ntb_transport_qp *qp = &nt->qp_vec[i];
913
914                 ntb_transport_setup_qp_mw(nt, i);
915
916                 if (qp->client_ready)
917                         schedule_delayed_work(&qp->link_work, 0);
918         }
919
920         return;
921
922 out1:
923         for (i = 0; i < nt->mw_count; i++)
924                 ntb_free_mw(nt, i);
925
926         /* if there's an actual failure, we should just bail */
927         if (rc < 0) {
928                 ntb_link_disable(ndev);
929                 return;
930         }
931
932 out:
933         if (ntb_link_is_up(ndev, NULL, NULL) == 1)
934                 schedule_delayed_work(&nt->link_work,
935                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
936 }
937
938 static void ntb_qp_link_work(struct work_struct *work)
939 {
940         struct ntb_transport_qp *qp = container_of(work,
941                                                    struct ntb_transport_qp,
942                                                    link_work.work);
943         struct pci_dev *pdev = qp->ndev->pdev;
944         struct ntb_transport_ctx *nt = qp->transport;
945         int val;
946
947         WARN_ON(!nt->link_is_up);
948
949         val = ntb_spad_read(nt->ndev, QP_LINKS);
950
951         ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
952
953         /* query remote spad for qp ready bits */
954         ntb_peer_spad_read(nt->ndev, QP_LINKS);
955         dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
956
957         /* See if the remote side is up */
958         if (val & BIT(qp->qp_num)) {
959                 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
960                 qp->link_is_up = true;
961                 qp->active = true;
962
963                 if (qp->event_handler)
964                         qp->event_handler(qp->cb_data, qp->link_is_up);
965
966                 if (qp->active)
967                         tasklet_schedule(&qp->rxc_db_work);
968         } else if (nt->link_is_up)
969                 schedule_delayed_work(&qp->link_work,
970                                       msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
971 }
972
973 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
974                                     unsigned int qp_num)
975 {
976         struct ntb_transport_qp *qp;
977         phys_addr_t mw_base;
978         resource_size_t mw_size;
979         unsigned int num_qps_mw, tx_size;
980         unsigned int mw_num, mw_count, qp_count;
981         u64 qp_offset;
982
983         mw_count = nt->mw_count;
984         qp_count = nt->qp_count;
985
986         mw_num = QP_TO_MW(nt, qp_num);
987
988         qp = &nt->qp_vec[qp_num];
989         qp->qp_num = qp_num;
990         qp->transport = nt;
991         qp->ndev = nt->ndev;
992         qp->client_ready = false;
993         qp->event_handler = NULL;
994         ntb_qp_link_down_reset(qp);
995
996         if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
997                 num_qps_mw = qp_count / mw_count + 1;
998         else
999                 num_qps_mw = qp_count / mw_count;
1000
1001         mw_base = nt->mw_vec[mw_num].phys_addr;
1002         mw_size = nt->mw_vec[mw_num].phys_size;
1003
1004         tx_size = (unsigned int)mw_size / num_qps_mw;
1005         qp_offset = tx_size * (qp_num / mw_count);
1006
1007         qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
1008         if (!qp->tx_mw)
1009                 return -EINVAL;
1010
1011         qp->tx_mw_phys = mw_base + qp_offset;
1012         if (!qp->tx_mw_phys)
1013                 return -EINVAL;
1014
1015         tx_size -= sizeof(struct ntb_rx_info);
1016         qp->rx_info = qp->tx_mw + tx_size;
1017
1018         /* Due to housekeeping, there must be atleast 2 buffs */
1019         qp->tx_max_frame = min(transport_mtu, tx_size / 2);
1020         qp->tx_max_entry = tx_size / qp->tx_max_frame;
1021
1022         if (nt->debugfs_node_dir) {
1023                 char debugfs_name[4];
1024
1025                 snprintf(debugfs_name, 4, "qp%d", qp_num);
1026                 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
1027                                                      nt->debugfs_node_dir);
1028
1029                 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
1030                                                         qp->debugfs_dir, qp,
1031                                                         &ntb_qp_debugfs_stats);
1032         } else {
1033                 qp->debugfs_dir = NULL;
1034                 qp->debugfs_stats = NULL;
1035         }
1036
1037         INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
1038         INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
1039
1040         spin_lock_init(&qp->ntb_rx_q_lock);
1041         spin_lock_init(&qp->ntb_tx_free_q_lock);
1042
1043         INIT_LIST_HEAD(&qp->rx_post_q);
1044         INIT_LIST_HEAD(&qp->rx_pend_q);
1045         INIT_LIST_HEAD(&qp->rx_free_q);
1046         INIT_LIST_HEAD(&qp->tx_free_q);
1047
1048         tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
1049                      (unsigned long)qp);
1050
1051         return 0;
1052 }
1053
1054 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1055 {
1056         struct ntb_transport_ctx *nt;
1057         struct ntb_transport_mw *mw;
1058         unsigned int mw_count, qp_count;
1059         u64 qp_bitmap;
1060         int node;
1061         int rc, i;
1062
1063         if (ntb_db_is_unsafe(ndev))
1064                 dev_dbg(&ndev->dev,
1065                         "doorbell is unsafe, proceed anyway...\n");
1066         if (ntb_spad_is_unsafe(ndev))
1067                 dev_dbg(&ndev->dev,
1068                         "scratchpad is unsafe, proceed anyway...\n");
1069
1070         node = dev_to_node(&ndev->dev);
1071
1072         nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1073         if (!nt)
1074                 return -ENOMEM;
1075
1076         nt->ndev = ndev;
1077
1078         mw_count = ntb_mw_count(ndev);
1079
1080         nt->mw_count = mw_count;
1081
1082         nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
1083                                   GFP_KERNEL, node);
1084         if (!nt->mw_vec) {
1085                 rc = -ENOMEM;
1086                 goto err;
1087         }
1088
1089         for (i = 0; i < mw_count; i++) {
1090                 mw = &nt->mw_vec[i];
1091
1092                 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
1093                                       &mw->xlat_align, &mw->xlat_align_size);
1094                 if (rc)
1095                         goto err1;
1096
1097                 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
1098                 if (!mw->vbase) {
1099                         rc = -ENOMEM;
1100                         goto err1;
1101                 }
1102
1103                 mw->buff_size = 0;
1104                 mw->xlat_size = 0;
1105                 mw->virt_addr = NULL;
1106                 mw->dma_addr = 0;
1107         }
1108
1109         qp_bitmap = ntb_db_valid_mask(ndev);
1110
1111         qp_count = ilog2(qp_bitmap);
1112         if (max_num_clients && max_num_clients < qp_count)
1113                 qp_count = max_num_clients;
1114         else if (mw_count < qp_count)
1115                 qp_count = mw_count;
1116
1117         qp_bitmap &= BIT_ULL(qp_count) - 1;
1118
1119         nt->qp_count = qp_count;
1120         nt->qp_bitmap = qp_bitmap;
1121         nt->qp_bitmap_free = qp_bitmap;
1122
1123         nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
1124                                   GFP_KERNEL, node);
1125         if (!nt->qp_vec) {
1126                 rc = -ENOMEM;
1127                 goto err1;
1128         }
1129
1130         if (nt_debugfs_dir) {
1131                 nt->debugfs_node_dir =
1132                         debugfs_create_dir(pci_name(ndev->pdev),
1133                                            nt_debugfs_dir);
1134         }
1135
1136         for (i = 0; i < qp_count; i++) {
1137                 rc = ntb_transport_init_queue(nt, i);
1138                 if (rc)
1139                         goto err2;
1140         }
1141
1142         INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1143         INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1144
1145         rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1146         if (rc)
1147                 goto err2;
1148
1149         INIT_LIST_HEAD(&nt->client_devs);
1150         rc = ntb_bus_init(nt);
1151         if (rc)
1152                 goto err3;
1153
1154         nt->link_is_up = false;
1155         ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1156         ntb_link_event(ndev);
1157
1158         return 0;
1159
1160 err3:
1161         ntb_clear_ctx(ndev);
1162 err2:
1163         kfree(nt->qp_vec);
1164 err1:
1165         while (i--) {
1166                 mw = &nt->mw_vec[i];
1167                 iounmap(mw->vbase);
1168         }
1169         kfree(nt->mw_vec);
1170 err:
1171         kfree(nt);
1172         return rc;
1173 }
1174
1175 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1176 {
1177         struct ntb_transport_ctx *nt = ndev->ctx;
1178         struct ntb_transport_qp *qp;
1179         u64 qp_bitmap_alloc;
1180         int i;
1181
1182         ntb_transport_link_cleanup(nt);
1183         cancel_work_sync(&nt->link_cleanup);
1184         cancel_delayed_work_sync(&nt->link_work);
1185
1186         qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1187
1188         /* verify that all the qp's are freed */
1189         for (i = 0; i < nt->qp_count; i++) {
1190                 qp = &nt->qp_vec[i];
1191                 if (qp_bitmap_alloc & BIT_ULL(i))
1192                         ntb_transport_free_queue(qp);
1193                 debugfs_remove_recursive(qp->debugfs_dir);
1194         }
1195
1196         ntb_link_disable(ndev);
1197         ntb_clear_ctx(ndev);
1198
1199         ntb_bus_remove(nt);
1200
1201         for (i = nt->mw_count; i--; ) {
1202                 ntb_free_mw(nt, i);
1203                 iounmap(nt->mw_vec[i].vbase);
1204         }
1205
1206         kfree(nt->qp_vec);
1207         kfree(nt->mw_vec);
1208         kfree(nt);
1209 }
1210
1211 static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1212 {
1213         struct ntb_queue_entry *entry;
1214         void *cb_data;
1215         unsigned int len;
1216         unsigned long irqflags;
1217
1218         spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1219
1220         while (!list_empty(&qp->rx_post_q)) {
1221                 entry = list_first_entry(&qp->rx_post_q,
1222                                          struct ntb_queue_entry, entry);
1223                 if (!(entry->flags & DESC_DONE_FLAG))
1224                         break;
1225
1226                 entry->rx_hdr->flags = 0;
1227                 iowrite32(entry->index, &qp->rx_info->entry);
1228
1229                 cb_data = entry->cb_data;
1230                 len = entry->len;
1231
1232                 list_move_tail(&entry->entry, &qp->rx_free_q);
1233
1234                 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1235
1236                 if (qp->rx_handler && qp->client_ready)
1237                         qp->rx_handler(qp, qp->cb_data, cb_data, len);
1238
1239                 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1240         }
1241
1242         spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1243 }
1244
1245 static void ntb_rx_copy_callback(void *data)
1246 {
1247         struct ntb_queue_entry *entry = data;
1248
1249         entry->flags |= DESC_DONE_FLAG;
1250
1251         ntb_complete_rxc(entry->qp);
1252 }
1253
1254 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1255 {
1256         void *buf = entry->buf;
1257         size_t len = entry->len;
1258
1259         memcpy(buf, offset, len);
1260
1261         /* Ensure that the data is fully copied out before clearing the flag */
1262         wmb();
1263
1264         ntb_rx_copy_callback(entry);
1265 }
1266
1267 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1268 {
1269         struct dma_async_tx_descriptor *txd;
1270         struct ntb_transport_qp *qp = entry->qp;
1271         struct dma_chan *chan = qp->rx_dma_chan;
1272         struct dma_device *device;
1273         size_t pay_off, buff_off, len;
1274         struct dmaengine_unmap_data *unmap;
1275         dma_cookie_t cookie;
1276         void *buf = entry->buf;
1277         int retries = 0;
1278
1279         len = entry->len;
1280
1281         if (!chan)
1282                 goto err;
1283
1284         if (len < copy_bytes)
1285                 goto err;
1286
1287         device = chan->device;
1288         pay_off = (size_t)offset & ~PAGE_MASK;
1289         buff_off = (size_t)buf & ~PAGE_MASK;
1290
1291         if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1292                 goto err;
1293
1294         unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1295         if (!unmap)
1296                 goto err;
1297
1298         unmap->len = len;
1299         unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1300                                       pay_off, len, DMA_TO_DEVICE);
1301         if (dma_mapping_error(device->dev, unmap->addr[0]))
1302                 goto err_get_unmap;
1303
1304         unmap->to_cnt = 1;
1305
1306         unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1307                                       buff_off, len, DMA_FROM_DEVICE);
1308         if (dma_mapping_error(device->dev, unmap->addr[1]))
1309                 goto err_get_unmap;
1310
1311         unmap->from_cnt = 1;
1312
1313         for (retries = 0; retries < DMA_RETRIES; retries++) {
1314                 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1315                                                      unmap->addr[0], len,
1316                                                      DMA_PREP_INTERRUPT);
1317                 if (txd)
1318                         break;
1319
1320                 set_current_state(TASK_INTERRUPTIBLE);
1321                 schedule_timeout(DMA_OUT_RESOURCE_TO);
1322         }
1323
1324         if (!txd) {
1325                 qp->dma_rx_prep_err++;
1326                 goto err_get_unmap;
1327         }
1328
1329         txd->callback = ntb_rx_copy_callback;
1330         txd->callback_param = entry;
1331         dma_set_unmap(txd, unmap);
1332
1333         cookie = dmaengine_submit(txd);
1334         if (dma_submit_error(cookie))
1335                 goto err_set_unmap;
1336
1337         dmaengine_unmap_put(unmap);
1338
1339         qp->last_cookie = cookie;
1340
1341         qp->rx_async++;
1342
1343         return;
1344
1345 err_set_unmap:
1346         dmaengine_unmap_put(unmap);
1347 err_get_unmap:
1348         dmaengine_unmap_put(unmap);
1349 err:
1350         ntb_memcpy_rx(entry, offset);
1351         qp->rx_memcpy++;
1352 }
1353
1354 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1355 {
1356         struct ntb_payload_header *hdr;
1357         struct ntb_queue_entry *entry;
1358         void *offset;
1359
1360         offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1361         hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1362
1363         dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1364                 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1365
1366         if (!(hdr->flags & DESC_DONE_FLAG)) {
1367                 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1368                 qp->rx_ring_empty++;
1369                 return -EAGAIN;
1370         }
1371
1372         if (hdr->flags & LINK_DOWN_FLAG) {
1373                 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1374                 ntb_qp_link_down(qp);
1375                 hdr->flags = 0;
1376                 return -EAGAIN;
1377         }
1378
1379         if (hdr->ver != (u32)qp->rx_pkts) {
1380                 dev_dbg(&qp->ndev->pdev->dev,
1381                         "version mismatch, expected %llu - got %u\n",
1382                         qp->rx_pkts, hdr->ver);
1383                 qp->rx_err_ver++;
1384                 return -EIO;
1385         }
1386
1387         entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1388         if (!entry) {
1389                 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1390                 qp->rx_err_no_buf++;
1391                 return -EAGAIN;
1392         }
1393
1394         entry->rx_hdr = hdr;
1395         entry->index = qp->rx_index;
1396
1397         if (hdr->len > entry->len) {
1398                 dev_dbg(&qp->ndev->pdev->dev,
1399                         "receive buffer overflow! Wanted %d got %d\n",
1400                         hdr->len, entry->len);
1401                 qp->rx_err_oflow++;
1402
1403                 entry->len = -EIO;
1404                 entry->flags |= DESC_DONE_FLAG;
1405
1406                 ntb_complete_rxc(qp);
1407         } else {
1408                 dev_dbg(&qp->ndev->pdev->dev,
1409                         "RX OK index %u ver %u size %d into buf size %d\n",
1410                         qp->rx_index, hdr->ver, hdr->len, entry->len);
1411
1412                 qp->rx_bytes += hdr->len;
1413                 qp->rx_pkts++;
1414
1415                 entry->len = hdr->len;
1416
1417                 ntb_async_rx(entry, offset);
1418         }
1419
1420         qp->rx_index++;
1421         qp->rx_index %= qp->rx_max_entry;
1422
1423         return 0;
1424 }
1425
1426 static void ntb_transport_rxc_db(unsigned long data)
1427 {
1428         struct ntb_transport_qp *qp = (void *)data;
1429         int rc, i;
1430
1431         dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1432                 __func__, qp->qp_num);
1433
1434         /* Limit the number of packets processed in a single interrupt to
1435          * provide fairness to others
1436          */
1437         for (i = 0; i < qp->rx_max_entry; i++) {
1438                 rc = ntb_process_rxc(qp);
1439                 if (rc)
1440                         break;
1441         }
1442
1443         if (i && qp->rx_dma_chan)
1444                 dma_async_issue_pending(qp->rx_dma_chan);
1445
1446         if (i == qp->rx_max_entry) {
1447                 /* there is more work to do */
1448                 if (qp->active)
1449                         tasklet_schedule(&qp->rxc_db_work);
1450         } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1451                 /* the doorbell bit is set: clear it */
1452                 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1453                 /* ntb_db_read ensures ntb_db_clear write is committed */
1454                 ntb_db_read(qp->ndev);
1455
1456                 /* an interrupt may have arrived between finishing
1457                  * ntb_process_rxc and clearing the doorbell bit:
1458                  * there might be some more work to do.
1459                  */
1460                 if (qp->active)
1461                         tasklet_schedule(&qp->rxc_db_work);
1462         }
1463 }
1464
1465 static void ntb_tx_copy_callback(void *data)
1466 {
1467         struct ntb_queue_entry *entry = data;
1468         struct ntb_transport_qp *qp = entry->qp;
1469         struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1470
1471         iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1472
1473         ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1474
1475         /* The entry length can only be zero if the packet is intended to be a
1476          * "link down" or similar.  Since no payload is being sent in these
1477          * cases, there is nothing to add to the completion queue.
1478          */
1479         if (entry->len > 0) {
1480                 qp->tx_bytes += entry->len;
1481
1482                 if (qp->tx_handler)
1483                         qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1484                                        entry->len);
1485         }
1486
1487         ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1488 }
1489
1490 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1491 {
1492 #ifdef ARCH_HAS_NOCACHE_UACCESS
1493         /*
1494          * Using non-temporal mov to improve performance on non-cached
1495          * writes, even though we aren't actually copying from user space.
1496          */
1497         __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
1498 #else
1499         memcpy_toio(offset, entry->buf, entry->len);
1500 #endif
1501
1502         /* Ensure that the data is fully copied out before setting the flags */
1503         wmb();
1504
1505         ntb_tx_copy_callback(entry);
1506 }
1507
1508 static void ntb_async_tx(struct ntb_transport_qp *qp,
1509                          struct ntb_queue_entry *entry)
1510 {
1511         struct ntb_payload_header __iomem *hdr;
1512         struct dma_async_tx_descriptor *txd;
1513         struct dma_chan *chan = qp->tx_dma_chan;
1514         struct dma_device *device;
1515         size_t dest_off, buff_off;
1516         struct dmaengine_unmap_data *unmap;
1517         dma_addr_t dest;
1518         dma_cookie_t cookie;
1519         void __iomem *offset;
1520         size_t len = entry->len;
1521         void *buf = entry->buf;
1522         int retries = 0;
1523
1524         offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1525         hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1526         entry->tx_hdr = hdr;
1527
1528         iowrite32(entry->len, &hdr->len);
1529         iowrite32((u32)qp->tx_pkts, &hdr->ver);
1530
1531         if (!chan)
1532                 goto err;
1533
1534         if (len < copy_bytes)
1535                 goto err;
1536
1537         device = chan->device;
1538         dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1539         buff_off = (size_t)buf & ~PAGE_MASK;
1540         dest_off = (size_t)dest & ~PAGE_MASK;
1541
1542         if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1543                 goto err;
1544
1545         unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1546         if (!unmap)
1547                 goto err;
1548
1549         unmap->len = len;
1550         unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1551                                       buff_off, len, DMA_TO_DEVICE);
1552         if (dma_mapping_error(device->dev, unmap->addr[0]))
1553                 goto err_get_unmap;
1554
1555         unmap->to_cnt = 1;
1556
1557         for (retries = 0; retries < DMA_RETRIES; retries++) {
1558                 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0],
1559                                                      len, DMA_PREP_INTERRUPT);
1560                 if (txd)
1561                         break;
1562
1563                 set_current_state(TASK_INTERRUPTIBLE);
1564                 schedule_timeout(DMA_OUT_RESOURCE_TO);
1565         }
1566
1567         if (!txd) {
1568                 qp->dma_tx_prep_err++;
1569                 goto err_get_unmap;
1570         }
1571
1572         txd->callback = ntb_tx_copy_callback;
1573         txd->callback_param = entry;
1574         dma_set_unmap(txd, unmap);
1575
1576         cookie = dmaengine_submit(txd);
1577         if (dma_submit_error(cookie))
1578                 goto err_set_unmap;
1579
1580         dmaengine_unmap_put(unmap);
1581
1582         dma_async_issue_pending(chan);
1583         qp->tx_async++;
1584
1585         return;
1586 err_set_unmap:
1587         dmaengine_unmap_put(unmap);
1588 err_get_unmap:
1589         dmaengine_unmap_put(unmap);
1590 err:
1591         ntb_memcpy_tx(entry, offset);
1592         qp->tx_memcpy++;
1593 }
1594
1595 static int ntb_process_tx(struct ntb_transport_qp *qp,
1596                           struct ntb_queue_entry *entry)
1597 {
1598         if (qp->tx_index == qp->remote_rx_info->entry) {
1599                 qp->tx_ring_full++;
1600                 return -EAGAIN;
1601         }
1602
1603         if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1604                 if (qp->tx_handler)
1605                         qp->tx_handler(qp, qp->cb_data, NULL, -EIO);
1606
1607                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1608                              &qp->tx_free_q);
1609                 return 0;
1610         }
1611
1612         ntb_async_tx(qp, entry);
1613
1614         qp->tx_index++;
1615         qp->tx_index %= qp->tx_max_entry;
1616
1617         qp->tx_pkts++;
1618
1619         return 0;
1620 }
1621
1622 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1623 {
1624         struct pci_dev *pdev = qp->ndev->pdev;
1625         struct ntb_queue_entry *entry;
1626         int i, rc;
1627
1628         if (!qp->link_is_up)
1629                 return;
1630
1631         dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
1632
1633         for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1634                 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1635                 if (entry)
1636                         break;
1637                 msleep(100);
1638         }
1639
1640         if (!entry)
1641                 return;
1642
1643         entry->cb_data = NULL;
1644         entry->buf = NULL;
1645         entry->len = 0;
1646         entry->flags = LINK_DOWN_FLAG;
1647
1648         rc = ntb_process_tx(qp, entry);
1649         if (rc)
1650                 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1651                         qp->qp_num);
1652
1653         ntb_qp_link_down_reset(qp);
1654 }
1655
1656 static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1657 {
1658         return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1659 }
1660
1661 /**
1662  * ntb_transport_create_queue - Create a new NTB transport layer queue
1663  * @rx_handler: receive callback function
1664  * @tx_handler: transmit callback function
1665  * @event_handler: event callback function
1666  *
1667  * Create a new NTB transport layer queue and provide the queue with a callback
1668  * routine for both transmit and receive.  The receive callback routine will be
1669  * used to pass up data when the transport has received it on the queue.   The
1670  * transmit callback routine will be called when the transport has completed the
1671  * transmission of the data on the queue and the data is ready to be freed.
1672  *
1673  * RETURNS: pointer to newly created ntb_queue, NULL on error.
1674  */
1675 struct ntb_transport_qp *
1676 ntb_transport_create_queue(void *data, struct device *client_dev,
1677                            const struct ntb_queue_handlers *handlers)
1678 {
1679         struct ntb_dev *ndev;
1680         struct pci_dev *pdev;
1681         struct ntb_transport_ctx *nt;
1682         struct ntb_queue_entry *entry;
1683         struct ntb_transport_qp *qp;
1684         u64 qp_bit;
1685         unsigned int free_queue;
1686         dma_cap_mask_t dma_mask;
1687         int node;
1688         int i;
1689
1690         ndev = dev_ntb(client_dev->parent);
1691         pdev = ndev->pdev;
1692         nt = ndev->ctx;
1693
1694         node = dev_to_node(&ndev->dev);
1695
1696         free_queue = ffs(nt->qp_bitmap);
1697         if (!free_queue)
1698                 goto err;
1699
1700         /* decrement free_queue to make it zero based */
1701         free_queue--;
1702
1703         qp = &nt->qp_vec[free_queue];
1704         qp_bit = BIT_ULL(qp->qp_num);
1705
1706         nt->qp_bitmap_free &= ~qp_bit;
1707
1708         qp->cb_data = data;
1709         qp->rx_handler = handlers->rx_handler;
1710         qp->tx_handler = handlers->tx_handler;
1711         qp->event_handler = handlers->event_handler;
1712
1713         dma_cap_zero(dma_mask);
1714         dma_cap_set(DMA_MEMCPY, dma_mask);
1715
1716         if (use_dma) {
1717                 qp->tx_dma_chan =
1718                         dma_request_channel(dma_mask, ntb_dma_filter_fn,
1719                                             (void *)(unsigned long)node);
1720                 if (!qp->tx_dma_chan)
1721                         dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
1722
1723                 qp->rx_dma_chan =
1724                         dma_request_channel(dma_mask, ntb_dma_filter_fn,
1725                                             (void *)(unsigned long)node);
1726                 if (!qp->rx_dma_chan)
1727                         dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
1728         } else {
1729                 qp->tx_dma_chan = NULL;
1730                 qp->rx_dma_chan = NULL;
1731         }
1732
1733         dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
1734                 qp->tx_dma_chan ? "DMA" : "CPU");
1735
1736         dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
1737                 qp->rx_dma_chan ? "DMA" : "CPU");
1738
1739         for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1740                 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1741                 if (!entry)
1742                         goto err1;
1743
1744                 entry->qp = qp;
1745                 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1746                              &qp->rx_free_q);
1747         }
1748         qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
1749
1750         for (i = 0; i < qp->tx_max_entry; i++) {
1751                 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
1752                 if (!entry)
1753                         goto err2;
1754
1755                 entry->qp = qp;
1756                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1757                              &qp->tx_free_q);
1758         }
1759
1760         ntb_db_clear(qp->ndev, qp_bit);
1761         ntb_db_clear_mask(qp->ndev, qp_bit);
1762
1763         dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1764
1765         return qp;
1766
1767 err2:
1768         while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1769                 kfree(entry);
1770 err1:
1771         qp->rx_alloc_entry = 0;
1772         while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1773                 kfree(entry);
1774         if (qp->tx_dma_chan)
1775                 dma_release_channel(qp->tx_dma_chan);
1776         if (qp->rx_dma_chan)
1777                 dma_release_channel(qp->rx_dma_chan);
1778         nt->qp_bitmap_free |= qp_bit;
1779 err:
1780         return NULL;
1781 }
1782 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1783
1784 /**
1785  * ntb_transport_free_queue - Frees NTB transport queue
1786  * @qp: NTB queue to be freed
1787  *
1788  * Frees NTB transport queue
1789  */
1790 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1791 {
1792         struct pci_dev *pdev;
1793         struct ntb_queue_entry *entry;
1794         u64 qp_bit;
1795
1796         if (!qp)
1797                 return;
1798
1799         pdev = qp->ndev->pdev;
1800
1801         qp->active = false;
1802
1803         if (qp->tx_dma_chan) {
1804                 struct dma_chan *chan = qp->tx_dma_chan;
1805                 /* Putting the dma_chan to NULL will force any new traffic to be
1806                  * processed by the CPU instead of the DAM engine
1807                  */
1808                 qp->tx_dma_chan = NULL;
1809
1810                 /* Try to be nice and wait for any queued DMA engine
1811                  * transactions to process before smashing it with a rock
1812                  */
1813                 dma_sync_wait(chan, qp->last_cookie);
1814                 dmaengine_terminate_all(chan);
1815                 dma_release_channel(chan);
1816         }
1817
1818         if (qp->rx_dma_chan) {
1819                 struct dma_chan *chan = qp->rx_dma_chan;
1820                 /* Putting the dma_chan to NULL will force any new traffic to be
1821                  * processed by the CPU instead of the DAM engine
1822                  */
1823                 qp->rx_dma_chan = NULL;
1824
1825                 /* Try to be nice and wait for any queued DMA engine
1826                  * transactions to process before smashing it with a rock
1827                  */
1828                 dma_sync_wait(chan, qp->last_cookie);
1829                 dmaengine_terminate_all(chan);
1830                 dma_release_channel(chan);
1831         }
1832
1833         qp_bit = BIT_ULL(qp->qp_num);
1834
1835         ntb_db_set_mask(qp->ndev, qp_bit);
1836         tasklet_kill(&qp->rxc_db_work);
1837
1838         cancel_delayed_work_sync(&qp->link_work);
1839
1840         qp->cb_data = NULL;
1841         qp->rx_handler = NULL;
1842         qp->tx_handler = NULL;
1843         qp->event_handler = NULL;
1844
1845         while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1846                 kfree(entry);
1847
1848         while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1849                 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1850                 kfree(entry);
1851         }
1852
1853         while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1854                 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1855                 kfree(entry);
1856         }
1857
1858         while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1859                 kfree(entry);
1860
1861         qp->transport->qp_bitmap_free |= qp_bit;
1862
1863         dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1864 }
1865 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1866
1867 /**
1868  * ntb_transport_rx_remove - Dequeues enqueued rx packet
1869  * @qp: NTB queue to be freed
1870  * @len: pointer to variable to write enqueued buffers length
1871  *
1872  * Dequeues unused buffers from receive queue.  Should only be used during
1873  * shutdown of qp.
1874  *
1875  * RETURNS: NULL error value on error, or void* for success.
1876  */
1877 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1878 {
1879         struct ntb_queue_entry *entry;
1880         void *buf;
1881
1882         if (!qp || qp->client_ready)
1883                 return NULL;
1884
1885         entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1886         if (!entry)
1887                 return NULL;
1888
1889         buf = entry->cb_data;
1890         *len = entry->len;
1891
1892         ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1893
1894         return buf;
1895 }
1896 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1897
1898 /**
1899  * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1900  * @qp: NTB transport layer queue the entry is to be enqueued on
1901  * @cb: per buffer pointer for callback function to use
1902  * @data: pointer to data buffer that incoming packets will be copied into
1903  * @len: length of the data buffer
1904  *
1905  * Enqueue a new receive buffer onto the transport queue into which a NTB
1906  * payload can be received into.
1907  *
1908  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1909  */
1910 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1911                              unsigned int len)
1912 {
1913         struct ntb_queue_entry *entry;
1914
1915         if (!qp)
1916                 return -EINVAL;
1917
1918         entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
1919         if (!entry)
1920                 return -ENOMEM;
1921
1922         entry->cb_data = cb;
1923         entry->buf = data;
1924         entry->len = len;
1925         entry->flags = 0;
1926
1927         ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1928
1929         if (qp->active)
1930                 tasklet_schedule(&qp->rxc_db_work);
1931
1932         return 0;
1933 }
1934 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1935
1936 /**
1937  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1938  * @qp: NTB transport layer queue the entry is to be enqueued on
1939  * @cb: per buffer pointer for callback function to use
1940  * @data: pointer to data buffer that will be sent
1941  * @len: length of the data buffer
1942  *
1943  * Enqueue a new transmit buffer onto the transport queue from which a NTB
1944  * payload will be transmitted.  This assumes that a lock is being held to
1945  * serialize access to the qp.
1946  *
1947  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1948  */
1949 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1950                              unsigned int len)
1951 {
1952         struct ntb_queue_entry *entry;
1953         int rc;
1954
1955         if (!qp || !qp->link_is_up || !len)
1956                 return -EINVAL;
1957
1958         entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1959         if (!entry) {
1960                 qp->tx_err_no_buf++;
1961                 return -EBUSY;
1962         }
1963
1964         entry->cb_data = cb;
1965         entry->buf = data;
1966         entry->len = len;
1967         entry->flags = 0;
1968
1969         rc = ntb_process_tx(qp, entry);
1970         if (rc)
1971                 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1972                              &qp->tx_free_q);
1973
1974         return rc;
1975 }
1976 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1977
1978 /**
1979  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1980  * @qp: NTB transport layer queue to be enabled
1981  *
1982  * Notify NTB transport layer of client readiness to use queue
1983  */
1984 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1985 {
1986         if (!qp)
1987                 return;
1988
1989         qp->client_ready = true;
1990
1991         if (qp->transport->link_is_up)
1992                 schedule_delayed_work(&qp->link_work, 0);
1993 }
1994 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1995
1996 /**
1997  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1998  * @qp: NTB transport layer queue to be disabled
1999  *
2000  * Notify NTB transport layer of client's desire to no longer receive data on
2001  * transport queue specified.  It is the client's responsibility to ensure all
2002  * entries on queue are purged or otherwise handled appropriately.
2003  */
2004 void ntb_transport_link_down(struct ntb_transport_qp *qp)
2005 {
2006         int val;
2007
2008         if (!qp)
2009                 return;
2010
2011         qp->client_ready = false;
2012
2013         val = ntb_spad_read(qp->ndev, QP_LINKS);
2014
2015         ntb_peer_spad_write(qp->ndev, QP_LINKS,
2016                             val & ~BIT(qp->qp_num));
2017
2018         if (qp->link_is_up)
2019                 ntb_send_link_down(qp);
2020         else
2021                 cancel_delayed_work_sync(&qp->link_work);
2022 }
2023 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
2024
2025 /**
2026  * ntb_transport_link_query - Query transport link state
2027  * @qp: NTB transport layer queue to be queried
2028  *
2029  * Query connectivity to the remote system of the NTB transport queue
2030  *
2031  * RETURNS: true for link up or false for link down
2032  */
2033 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
2034 {
2035         if (!qp)
2036                 return false;
2037
2038         return qp->link_is_up;
2039 }
2040 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
2041
2042 /**
2043  * ntb_transport_qp_num - Query the qp number
2044  * @qp: NTB transport layer queue to be queried
2045  *
2046  * Query qp number of the NTB transport queue
2047  *
2048  * RETURNS: a zero based number specifying the qp number
2049  */
2050 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
2051 {
2052         if (!qp)
2053                 return 0;
2054
2055         return qp->qp_num;
2056 }
2057 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
2058
2059 /**
2060  * ntb_transport_max_size - Query the max payload size of a qp
2061  * @qp: NTB transport layer queue to be queried
2062  *
2063  * Query the maximum payload size permissible on the given qp
2064  *
2065  * RETURNS: the max payload size of a qp
2066  */
2067 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
2068 {
2069         unsigned int max_size;
2070         unsigned int copy_align;
2071         struct dma_chan *rx_chan, *tx_chan;
2072
2073         if (!qp)
2074                 return 0;
2075
2076         rx_chan = qp->rx_dma_chan;
2077         tx_chan = qp->tx_dma_chan;
2078
2079         copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
2080                          tx_chan ? tx_chan->device->copy_align : 0);
2081
2082         /* If DMA engine usage is possible, try to find the max size for that */
2083         max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
2084         max_size = round_down(max_size, 1 << copy_align);
2085
2086         return max_size;
2087 }
2088 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
2089
2090 unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
2091 {
2092         unsigned int head = qp->tx_index;
2093         unsigned int tail = qp->remote_rx_info->entry;
2094
2095         return tail > head ? tail - head : qp->tx_max_entry + tail - head;
2096 }
2097 EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
2098
2099 static void ntb_transport_doorbell_callback(void *data, int vector)
2100 {
2101         struct ntb_transport_ctx *nt = data;
2102         struct ntb_transport_qp *qp;
2103         u64 db_bits;
2104         unsigned int qp_num;
2105
2106         db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
2107                    ntb_db_vector_mask(nt->ndev, vector));
2108
2109         while (db_bits) {
2110                 qp_num = __ffs(db_bits);
2111                 qp = &nt->qp_vec[qp_num];
2112
2113                 if (qp->active)
2114                         tasklet_schedule(&qp->rxc_db_work);
2115
2116                 db_bits &= ~BIT_ULL(qp_num);
2117         }
2118 }
2119
2120 static const struct ntb_ctx_ops ntb_transport_ops = {
2121         .link_event = ntb_transport_event_callback,
2122         .db_event = ntb_transport_doorbell_callback,
2123 };
2124
2125 static struct ntb_client ntb_transport_client = {
2126         .ops = {
2127                 .probe = ntb_transport_probe,
2128                 .remove = ntb_transport_free,
2129         },
2130 };
2131
2132 static int __init ntb_transport_init(void)
2133 {
2134         int rc;
2135
2136         pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
2137
2138         if (debugfs_initialized())
2139                 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2140
2141         rc = bus_register(&ntb_transport_bus);
2142         if (rc)
2143                 goto err_bus;
2144
2145         rc = ntb_register_client(&ntb_transport_client);
2146         if (rc)
2147                 goto err_client;
2148
2149         return 0;
2150
2151 err_client:
2152         bus_unregister(&ntb_transport_bus);
2153 err_bus:
2154         debugfs_remove_recursive(nt_debugfs_dir);
2155         return rc;
2156 }
2157 module_init(ntb_transport_init);
2158
2159 static void __exit ntb_transport_exit(void)
2160 {
2161         debugfs_remove_recursive(nt_debugfs_dir);
2162
2163         ntb_unregister_client(&ntb_transport_client);
2164         bus_unregister(&ntb_transport_bus);
2165 }
2166 module_exit(ntb_transport_exit);