1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
37 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
47 #include <linux/qed/qed_if.h>
50 #include <rdma/qedr-abi.h>
52 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
55 struct qedr_dev *dev = get_qedr_dev(ibdev);
58 if (!rdma_cap_roce_gid_table(ibdev, port))
61 rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
63 memcpy(sgid, &zgid, sizeof(*sgid));
67 DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
68 sgid->global.interface_id, sgid->global.subnet_prefix);
73 int qedr_add_gid(struct ib_device *device, u8 port_num,
74 unsigned int index, const union ib_gid *gid,
75 const struct ib_gid_attr *attr, void **context)
77 if (!rdma_cap_roce_gid_table(device, port_num))
80 if (port_num > QEDR_MAX_PORT)
89 int qedr_del_gid(struct ib_device *device, u8 port_num,
90 unsigned int index, void **context)
92 if (!rdma_cap_roce_gid_table(device, port_num))
95 if (port_num > QEDR_MAX_PORT)
104 int qedr_query_device(struct ib_device *ibdev,
105 struct ib_device_attr *attr, struct ib_udata *udata)
107 struct qedr_dev *dev = get_qedr_dev(ibdev);
108 struct qedr_device_attr *qattr = &dev->attr;
110 if (!dev->rdma_ctx) {
112 "qedr_query_device called with invalid params rdma_ctx=%p\n",
117 memset(attr, 0, sizeof(*attr));
119 attr->fw_ver = qattr->fw_ver;
120 attr->sys_image_guid = qattr->sys_image_guid;
121 attr->max_mr_size = qattr->max_mr_size;
122 attr->page_size_cap = qattr->page_size_caps;
123 attr->vendor_id = qattr->vendor_id;
124 attr->vendor_part_id = qattr->vendor_part_id;
125 attr->hw_ver = qattr->hw_ver;
126 attr->max_qp = qattr->max_qp;
127 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
128 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
129 IB_DEVICE_RC_RNR_NAK_GEN |
130 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
132 attr->max_sge = qattr->max_sge;
133 attr->max_sge_rd = qattr->max_sge;
134 attr->max_cq = qattr->max_cq;
135 attr->max_cqe = qattr->max_cqe;
136 attr->max_mr = qattr->max_mr;
137 attr->max_mw = qattr->max_mw;
138 attr->max_pd = qattr->max_pd;
139 attr->atomic_cap = dev->atomic_cap;
140 attr->max_fmr = qattr->max_fmr;
141 attr->max_map_per_fmr = 16;
142 attr->max_qp_init_rd_atom =
143 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
144 attr->max_qp_rd_atom =
145 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
146 attr->max_qp_init_rd_atom);
148 attr->max_srq = qattr->max_srq;
149 attr->max_srq_sge = qattr->max_srq_sge;
150 attr->max_srq_wr = qattr->max_srq_wr;
152 attr->local_ca_ack_delay = qattr->dev_ack_delay;
153 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
154 attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
155 attr->max_ah = qattr->max_ah;
160 #define QEDR_SPEED_SDR (1)
161 #define QEDR_SPEED_DDR (2)
162 #define QEDR_SPEED_QDR (4)
163 #define QEDR_SPEED_FDR10 (8)
164 #define QEDR_SPEED_FDR (16)
165 #define QEDR_SPEED_EDR (32)
167 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
172 *ib_speed = QEDR_SPEED_SDR;
173 *ib_width = IB_WIDTH_1X;
176 *ib_speed = QEDR_SPEED_QDR;
177 *ib_width = IB_WIDTH_1X;
181 *ib_speed = QEDR_SPEED_DDR;
182 *ib_width = IB_WIDTH_4X;
186 *ib_speed = QEDR_SPEED_EDR;
187 *ib_width = IB_WIDTH_1X;
191 *ib_speed = QEDR_SPEED_QDR;
192 *ib_width = IB_WIDTH_4X;
196 *ib_speed = QEDR_SPEED_QDR;
197 *ib_width = IB_WIDTH_4X;
201 *ib_speed = QEDR_SPEED_EDR;
202 *ib_width = IB_WIDTH_4X;
207 *ib_speed = QEDR_SPEED_SDR;
208 *ib_width = IB_WIDTH_1X;
212 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
214 struct qedr_dev *dev;
215 struct qed_rdma_port *rdma_port;
217 dev = get_qedr_dev(ibdev);
219 DP_ERR(dev, "invalid_port=0x%x\n", port);
223 if (!dev->rdma_ctx) {
224 DP_ERR(dev, "rdma_ctx is NULL\n");
228 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
229 memset(attr, 0, sizeof(*attr));
231 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
232 attr->state = IB_PORT_ACTIVE;
233 attr->phys_state = 5;
235 attr->state = IB_PORT_DOWN;
236 attr->phys_state = 3;
238 attr->max_mtu = IB_MTU_4096;
239 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
244 attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
245 attr->gid_tbl_len = QEDR_MAX_SGID;
246 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
247 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
248 attr->qkey_viol_cntr = 0;
249 get_link_speed_and_width(rdma_port->link_speed,
250 &attr->active_speed, &attr->active_width);
251 attr->max_msg_sz = rdma_port->max_msg_size;
252 attr->max_vl_num = 4;
257 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
258 struct ib_port_modify *props)
260 struct qedr_dev *dev;
262 dev = get_qedr_dev(ibdev);
264 DP_ERR(dev, "invalid_port=0x%x\n", port);
271 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
276 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
280 mm->key.phy_addr = phy_addr;
281 /* This function might be called with a length which is not a multiple
282 * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
283 * forces this granularity by increasing the requested size if needed.
284 * When qedr_mmap is called, it will search the list with the updated
285 * length as a key. To prevent search failures, the length is rounded up
286 * in advance to PAGE_SIZE.
288 mm->key.len = roundup(len, PAGE_SIZE);
289 INIT_LIST_HEAD(&mm->entry);
291 mutex_lock(&uctx->mm_list_lock);
292 list_add(&mm->entry, &uctx->mm_head);
293 mutex_unlock(&uctx->mm_list_lock);
295 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
296 "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
297 (unsigned long long)mm->key.phy_addr,
298 (unsigned long)mm->key.len, uctx);
303 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
309 mutex_lock(&uctx->mm_list_lock);
310 list_for_each_entry(mm, &uctx->mm_head, entry) {
311 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
317 mutex_unlock(&uctx->mm_list_lock);
318 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
319 "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
320 mm->key.phy_addr, mm->key.len, uctx, found);
325 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
326 struct ib_udata *udata)
329 struct qedr_ucontext *ctx;
330 struct qedr_alloc_ucontext_resp uresp;
331 struct qedr_dev *dev = get_qedr_dev(ibdev);
332 struct qed_rdma_add_user_out_params oparams;
335 return ERR_PTR(-EFAULT);
337 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
339 return ERR_PTR(-ENOMEM);
341 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
344 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
349 ctx->dpi = oparams.dpi;
350 ctx->dpi_addr = oparams.dpi_addr;
351 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
352 ctx->dpi_size = oparams.dpi_size;
353 INIT_LIST_HEAD(&ctx->mm_head);
354 mutex_init(&ctx->mm_list_lock);
356 memset(&uresp, 0, sizeof(uresp));
358 uresp.db_pa = ctx->dpi_phys_addr;
359 uresp.db_size = ctx->dpi_size;
360 uresp.max_send_wr = dev->attr.max_sqe;
361 uresp.max_recv_wr = dev->attr.max_rqe;
362 uresp.max_srq_wr = dev->attr.max_srq_wr;
363 uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
364 uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
365 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
366 uresp.max_cqes = QEDR_MAX_CQES;
368 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
374 rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
378 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
380 return &ctx->ibucontext;
387 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
389 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
390 struct qedr_mm *mm, *tmp;
393 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
395 uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
397 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
398 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
399 "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
400 mm->key.phy_addr, mm->key.len, uctx);
401 list_del(&mm->entry);
409 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
411 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
412 struct qedr_dev *dev = get_qedr_dev(context->device);
413 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
414 u64 unmapped_db = dev->db_phys_addr;
415 unsigned long len = (vma->vm_end - vma->vm_start);
419 DP_DEBUG(dev, QEDR_MSG_INIT,
420 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
421 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
422 if (vma->vm_start & (PAGE_SIZE - 1)) {
423 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
428 found = qedr_search_mmap(ucontext, vm_page, len);
430 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
435 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
437 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
439 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
440 if (vma->vm_flags & VM_READ) {
441 DP_ERR(dev, "Trying to map doorbell bar for read\n");
445 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
447 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
448 PAGE_SIZE, vma->vm_page_prot);
450 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
451 rc = remap_pfn_range(vma, vma->vm_start,
452 vma->vm_pgoff, len, vma->vm_page_prot);
454 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);