geneve: avoid using stale geneve socket.
[cascardo/linux.git] / drivers / infiniband / hw / hns / hns_roce_cq.c
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
37 #include "hns_roce_hem.h"
38 #include "hns_roce_user.h"
39 #include "hns_roce_common.h"
40
41 static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
42 {
43         struct ib_cq *ibcq = &hr_cq->ib_cq;
44
45         ibcq->comp_handler(ibcq, ibcq->cq_context);
46 }
47
48 static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
49                                  enum hns_roce_event event_type)
50 {
51         struct hns_roce_dev *hr_dev;
52         struct ib_event event;
53         struct ib_cq *ibcq;
54
55         ibcq = &hr_cq->ib_cq;
56         hr_dev = to_hr_dev(ibcq->device);
57
58         if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
59             event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
60             event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
61                 dev_err(&hr_dev->pdev->dev,
62                         "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
63                         event_type, hr_cq->cqn);
64                 return;
65         }
66
67         if (ibcq->event_handler) {
68                 event.device = ibcq->device;
69                 event.event = IB_EVENT_CQ_ERR;
70                 event.element.cq = ibcq;
71                 ibcq->event_handler(&event, ibcq->cq_context);
72         }
73 }
74
75 static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
76                              struct hns_roce_cmd_mailbox *mailbox,
77                              unsigned long cq_num)
78 {
79         return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
80                             HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
81 }
82
83 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
84                              struct hns_roce_mtt *hr_mtt,
85                              struct hns_roce_uar *hr_uar,
86                              struct hns_roce_cq *hr_cq, int vector,
87                              int collapsed)
88 {
89         struct hns_roce_cmd_mailbox *mailbox = NULL;
90         struct hns_roce_cq_table *cq_table = NULL;
91         struct device *dev = &hr_dev->pdev->dev;
92         dma_addr_t dma_handle;
93         u64 *mtts = NULL;
94         int ret = 0;
95
96         cq_table = &hr_dev->cq_table;
97
98         /* Get the physical address of cq buf */
99         mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
100                                    hr_mtt->first_seg, &dma_handle);
101         if (!mtts) {
102                 dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
103                 return -EINVAL;
104         }
105
106         if (vector >= hr_dev->caps.num_comp_vectors) {
107                 dev_err(dev, "CQ alloc.Invalid vector.\n");
108                 return -EINVAL;
109         }
110         hr_cq->vector = vector;
111
112         ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
113         if (ret == -1) {
114                 dev_err(dev, "CQ alloc.Failed to alloc index.\n");
115                 return -ENOMEM;
116         }
117
118         /* Get CQC memory HEM(Hardware Entry Memory) table */
119         ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
120         if (ret) {
121                 dev_err(dev, "CQ alloc.Failed to get context mem.\n");
122                 goto err_out;
123         }
124
125         /* The cq insert radix tree */
126         spin_lock_irq(&cq_table->lock);
127         /* Radix_tree: The associated pointer and long integer key value like */
128         ret = radix_tree_insert(&cq_table->tree, hr_cq->cqn, hr_cq);
129         spin_unlock_irq(&cq_table->lock);
130         if (ret) {
131                 dev_err(dev, "CQ alloc.Failed to radix_tree_insert.\n");
132                 goto err_put;
133         }
134
135         /* Allocate mailbox memory */
136         mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
137         if (IS_ERR(mailbox)) {
138                 ret = PTR_ERR(mailbox);
139                 goto err_radix;
140         }
141
142         hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle,
143                               nent, vector);
144
145         /* Send mailbox to hw */
146         ret = hns_roce_sw2hw_cq(hr_dev, mailbox, hr_cq->cqn);
147         hns_roce_free_cmd_mailbox(hr_dev, mailbox);
148         if (ret) {
149                 dev_err(dev, "CQ alloc.Failed to cmd mailbox.\n");
150                 goto err_radix;
151         }
152
153         hr_cq->cons_index = 0;
154         hr_cq->uar = hr_uar;
155
156         return 0;
157
158 err_radix:
159         spin_lock_irq(&cq_table->lock);
160         radix_tree_delete(&cq_table->tree, hr_cq->cqn);
161         spin_unlock_irq(&cq_table->lock);
162
163 err_put:
164         hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
165
166 err_out:
167         hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
168         return ret;
169 }
170
171 static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
172                              struct hns_roce_cmd_mailbox *mailbox,
173                              unsigned long cq_num)
174 {
175         return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
176                                  mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
177                                  HNS_ROCE_CMD_TIME_CLASS_A);
178 }
179
180 static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
181                              struct hns_roce_cq *hr_cq)
182 {
183         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
184         struct device *dev = &hr_dev->pdev->dev;
185         int ret;
186
187         ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
188         if (ret)
189                 dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
190                         hr_cq->cqn);
191
192         /* Waiting interrupt process procedure carried out */
193         synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
194
195         spin_lock_irq(&cq_table->lock);
196         radix_tree_delete(&cq_table->tree, hr_cq->cqn);
197         spin_unlock_irq(&cq_table->lock);
198
199         hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
200         hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
201 }
202
203 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
204                                    struct ib_ucontext *context,
205                                    struct hns_roce_cq_buf *buf,
206                                    struct ib_umem **umem, u64 buf_addr, int cqe)
207 {
208         int ret;
209
210         *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
211                             IB_ACCESS_LOCAL_WRITE, 1);
212         if (IS_ERR(*umem))
213                 return PTR_ERR(*umem);
214
215         ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
216                                 ilog2((unsigned int)(*umem)->page_size),
217                                 &buf->hr_mtt);
218         if (ret)
219                 goto err_buf;
220
221         ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem);
222         if (ret)
223                 goto err_mtt;
224
225         return 0;
226
227 err_mtt:
228         hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
229
230 err_buf:
231         ib_umem_release(*umem);
232         return ret;
233 }
234
235 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
236                                     struct hns_roce_cq_buf *buf, u32 nent)
237 {
238         int ret;
239
240         ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
241                                  PAGE_SIZE * 2, &buf->hr_buf);
242         if (ret)
243                 goto out;
244
245         ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
246                                 buf->hr_buf.page_shift, &buf->hr_mtt);
247         if (ret)
248                 goto err_buf;
249
250         ret = hns_roce_buf_write_mtt(hr_dev, &buf->hr_mtt, &buf->hr_buf);
251         if (ret)
252                 goto err_mtt;
253
254         return 0;
255
256 err_mtt:
257         hns_roce_mtt_cleanup(hr_dev, &buf->hr_mtt);
258
259 err_buf:
260         hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz,
261                           &buf->hr_buf);
262 out:
263         return ret;
264 }
265
266 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
267                                     struct hns_roce_cq_buf *buf, int cqe)
268 {
269         hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
270                           &buf->hr_buf);
271 }
272
273 struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
274                                     const struct ib_cq_init_attr *attr,
275                                     struct ib_ucontext *context,
276                                     struct ib_udata *udata)
277 {
278         struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
279         struct device *dev = &hr_dev->pdev->dev;
280         struct hns_roce_ib_create_cq ucmd;
281         struct hns_roce_cq *hr_cq = NULL;
282         struct hns_roce_uar *uar = NULL;
283         int vector = attr->comp_vector;
284         int cq_entries = attr->cqe;
285         int ret = 0;
286
287         if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
288                 dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
289                         cq_entries, hr_dev->caps.max_cqes);
290                 return ERR_PTR(-EINVAL);
291         }
292
293         hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
294         if (!hr_cq)
295                 return ERR_PTR(-ENOMEM);
296
297         /* In v1 engine, parameter verification */
298         if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
299                 cq_entries = HNS_ROCE_MIN_CQE_NUM;
300
301         cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
302         hr_cq->ib_cq.cqe = cq_entries - 1;
303         mutex_init(&hr_cq->resize_mutex);
304         spin_lock_init(&hr_cq->lock);
305         hr_cq->hr_resize_buf = NULL;
306         hr_cq->resize_umem = NULL;
307
308         if (context) {
309                 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
310                         dev_err(dev, "Failed to copy_from_udata.\n");
311                         ret = -EFAULT;
312                         goto err_cq;
313                 }
314
315                 /* Get user space address, write it into mtt table */
316                 ret = hns_roce_ib_get_cq_umem(hr_dev, context, &hr_cq->hr_buf,
317                                               &hr_cq->umem, ucmd.buf_addr,
318                                               cq_entries);
319                 if (ret) {
320                         dev_err(dev, "Failed to get_cq_umem.\n");
321                         goto err_cq;
322                 }
323
324                 /* Get user space parameters */
325                 uar = &to_hr_ucontext(context)->uar;
326         } else {
327                 /* Init mmt table and write buff address to mtt table */
328                 ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
329                                                cq_entries);
330                 if (ret) {
331                         dev_err(dev, "Failed to alloc_cq_buf.\n");
332                         goto err_cq;
333                 }
334
335                 uar = &hr_dev->priv_uar;
336                 hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
337                                  0x1000 * uar->index;
338         }
339
340         /* Allocate cq index, fill cq_context */
341         ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
342                                 uar, hr_cq, vector, 0);
343         if (ret) {
344                 dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
345                 goto err_mtt;
346         }
347
348         /* Get created cq handler and carry out event */
349         hr_cq->comp = hns_roce_ib_cq_comp;
350         hr_cq->event = hns_roce_ib_cq_event;
351         hr_cq->cq_depth = cq_entries;
352
353         if (context) {
354                 if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
355                         ret = -EFAULT;
356                         goto err_mtt;
357                 }
358         }
359
360         return &hr_cq->ib_cq;
361
362 err_mtt:
363         hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
364         if (context)
365                 ib_umem_release(hr_cq->umem);
366         else
367                 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
368                                         hr_cq->ib_cq.cqe);
369
370 err_cq:
371         kfree(hr_cq);
372         return ERR_PTR(ret);
373 }
374
375 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
376 {
377         struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
378         struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
379
380         hns_roce_free_cq(hr_dev, hr_cq);
381         hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
382
383         if (ib_cq->uobject)
384                 ib_umem_release(hr_cq->umem);
385         else
386                 /* Free the buff of stored cq */
387                 hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
388
389         kfree(hr_cq);
390
391         return 0;
392 }
393
394 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
395 {
396         struct device *dev = &hr_dev->pdev->dev;
397         struct hns_roce_cq *cq;
398
399         cq = radix_tree_lookup(&hr_dev->cq_table.tree,
400                                cqn & (hr_dev->caps.num_cqs - 1));
401         if (!cq) {
402                 dev_warn(dev, "Completion event for bogus CQ 0x%08x\n", cqn);
403                 return;
404         }
405
406         cq->comp(cq);
407 }
408
409 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
410 {
411         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
412         struct device *dev = &hr_dev->pdev->dev;
413         struct hns_roce_cq *cq;
414
415         cq = radix_tree_lookup(&cq_table->tree,
416                                cqn & (hr_dev->caps.num_cqs - 1));
417         if (cq)
418                 atomic_inc(&cq->refcount);
419
420         if (!cq) {
421                 dev_warn(dev, "Async event for bogus CQ %08x\n", cqn);
422                 return;
423         }
424
425         cq->event(cq, (enum hns_roce_event)event_type);
426
427         if (atomic_dec_and_test(&cq->refcount))
428                 complete(&cq->free);
429 }
430
431 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
432 {
433         struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
434
435         spin_lock_init(&cq_table->lock);
436         INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
437
438         return hns_roce_bitmap_init(&cq_table->bitmap, hr_dev->caps.num_cqs,
439                                     hr_dev->caps.num_cqs - 1,
440                                     hr_dev->caps.reserved_cqs, 0);
441 }
442
443 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
444 {
445         hns_roce_bitmap_cleanup(&hr_dev->cq_table.bitmap);
446 }