2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
37 #include "hns_roce_hem.h"
38 #include "hns_roce_user.h"
39 #include "hns_roce_common.h"
41 static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
43 struct ib_cq *ibcq = &hr_cq->ib_cq;
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
48 static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
49 enum hns_roce_event event_type)
51 struct hns_roce_dev *hr_dev;
52 struct ib_event event;
56 hr_dev = to_hr_dev(ibcq->device);
58 if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
59 event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
60 event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
61 dev_err(&hr_dev->pdev->dev,
62 "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
63 event_type, hr_cq->cqn);
67 if (ibcq->event_handler) {
68 event.device = ibcq->device;
69 event.event = IB_EVENT_CQ_ERR;
70 event.element.cq = ibcq;
71 ibcq->event_handler(&event, ibcq->cq_context);
75 static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
76 struct hns_roce_cmd_mailbox *mailbox,
79 return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
80 HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
83 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
84 struct hns_roce_mtt *hr_mtt,
85 struct hns_roce_uar *hr_uar,
86 struct hns_roce_cq *hr_cq, int vector,
89 struct hns_roce_cmd_mailbox *mailbox = NULL;
90 struct hns_roce_cq_table *cq_table = NULL;
91 struct device *dev = &hr_dev->pdev->dev;
92 dma_addr_t dma_handle;
96 cq_table = &hr_dev->cq_table;
98 /* Get the physical address of cq buf */
99 mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
100 hr_mtt->first_seg, &dma_handle);
102 dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
106 if (vector >= hr_dev->caps.num_comp_vectors) {
107 dev_err(dev, "CQ alloc.Invalid vector.\n");
110 hr_cq->vector = vector;
112 ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
114 dev_err(dev, "CQ alloc.Failed to alloc index.\n");
118 /* Get CQC memory HEM(Hardware Entry Memory) table */
119 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
121 dev_err(dev, "CQ alloc.Failed to get context mem.\n");
125 /* The cq insert radix tree */
126 spin_lock_irq(&cq_table->lock);
127 /* Radix_tree: The associated pointer and long integer key value like */
128 ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
129 spin_unlock_irq(&cq_table->lock);
131 dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
135 /* Allocate mailbox memory */
136 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
137 if (IS_ERR(mailbox)) {
138 ret = PTR_ERR(mailbox);
142 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
145 /* Send mailbox to hw */
146 ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
147 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
149 dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
153 hr_cq->cons_index = 0;
159 spin_lock_irq(&cq_table->lock);
160 radix_tree_delete(&cq_table->tree, hr_cq->cqn);
161 spin_unlock_irq(&cq_table->lock);
164 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
167 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
171 static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
172 struct hns_roce_cmd_mailbox *mailbox,
173 unsigned long cq_num)
175 return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
176 mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
177 HNS_ROCE_CMD_TIME_CLASS_A);
180 static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
181 struct hns_roce_cq *hr_cq)
183 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
184 struct device *dev = &hr_dev->pdev->dev;
187 ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
189 dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
192 /* Waiting interrupt process procedure carried out */
193 synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
195 spin_lock_irq(&cq_table->lock);
196 radix_tree_delete(&cq_table->tree, hr_cq->cqn);
197 spin_unlock_irq(&cq_table->lock);
199 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
200 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
203 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
204 struct ib_ucontext *context,
205 struct hns_roce_cq_buf *buf,
206 struct ib_umem **umem, u64 buf_addr, int cqe)
210 *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
211 IB_ACCESS_LOCAL_WRITE, 1);
213 return PTR_ERR(*umem);
215 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
216 ilog2((unsigned int)(*umem)->page_size),
221 ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
228 hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
231 ib_umem_release(*umem);
235 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
236 struct hns_roce_cq_buf *buf, u32 nent)
240 ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
241 PAGE_SIZE * 2, &buf->hr_buf);
245 ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
246 buf->hr_buf.page_shift, &buf->hr_mtt);
250 ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
257 hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
260 hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
266 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
267 struct hns_roce_cq_buf *buf, int cqe)
269 hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
273 struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
274 const struct ib_cq_init_attr *attr,
275 struct ib_ucontext *context,
276 struct ib_udata *udata)
278 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
279 struct device *dev = &hr_dev->pdev->dev;
280 struct hns_roce_ib_create_cq ucmd;
281 struct hns_roce_cq *hr_cq = NULL;
282 struct hns_roce_uar *uar = NULL;
283 int vector = attr->comp_vector;
284 int cq_entries = attr->cqe;
287 if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
288 dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
289 cq_entries, hr_dev->caps.max_cqes);
290 return ERR_PTR(-EINVAL);
293 hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
295 return ERR_PTR(-ENOMEM);
297 /* In v1 engine, parameter verification */
298 if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
299 cq_entries = HNS_ROCE_MIN_CQE_NUM;
301 cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
302 hr_cq->ib_cq.cqe = cq_entries - 1;
303 mutex_init(&hr_cq->resize_mutex);
304 spin_lock_init(&hr_cq->lock);
305 hr_cq->hr_resize_buf = NULL;
306 hr_cq->resize_umem = NULL;
309 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
310 dev_err(dev, "Failed to copy_from_udata.\n");
315 /* Get user space address, write it into mtt table */
316 ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
317 &hr_cq->umem, ucmd.buf_addr,
320 dev_err(dev, "Failed to get_cq_umem.\n");
324 /* Get user space parameters */
325 uar = &to_hr_ucontext(context)->uar;
327 /* Init mmt table and write buff address to mtt table */
328 ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
331 dev_err(dev, "Failed to alloc_cq_buf.\n");
335 uar = &hr_dev->priv_uar;
336 hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
340 /* Allocate cq index, fill cq_context */
341 ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
342 uar, hr_cq, vector, 0);
344 dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
348 /* Get created cq handler and carry out event */
349 hr_cq->comp = hns_roce_ib_cq_comp;
350 hr_cq->event = hns_roce_ib_cq_event;
351 hr_cq->cq_depth = cq_entries;
354 if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
360 return &hr_cq->ib_cq;
363 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
365 ib_umem_release(hr_cq->umem);
367 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
375 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
377 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
378 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
380 hns_roce_free_cq(hr_dev, hr_cq);
381 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
384 ib_umem_release(hr_cq->umem);
386 /* Free the buff of stored cq */
387 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
394 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
396 struct device *dev = &hr_dev->pdev->dev;
397 struct hns_roce_cq *cq;
399 cq = radix_tree_lookup(&hr_dev->cq_table.tree,
400 cqn & (hr_dev->caps.num_cqs - 1));
402 dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
409 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
411 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
412 struct device *dev = &hr_dev->pdev->dev;
413 struct hns_roce_cq *cq;
415 cq = radix_tree_lookup(&cq_table->tree,
416 cqn & (hr_dev->caps.num_cqs - 1));
418 atomic_inc(&cq->refcount);
421 dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
425 cq->event(cq, (enum hns_roce_event)event_type);
427 if (atomic_dec_and_test(&cq->refcount))
431 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
433 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
435 spin_lock_init(&cq_table->lock);
436 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
438 return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
439 hr_dev->caps.num_cqs - 1,
440 hr_dev->caps.reserved_cqs, 0);
443 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
445 hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);