IB/core: extended command: an improved infrastructure for uverbs commands
[cascardo/linux.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39
40 #include <asm/uaccess.h>
41
42 #include "uverbs.h"
43
44 struct uverbs_lock_class {
45         struct lock_class_key   key;
46         char                    name[16];
47 };
48
49 static struct uverbs_lock_class pd_lock_class   = { .name = "PD-uobj" };
50 static struct uverbs_lock_class mr_lock_class   = { .name = "MR-uobj" };
51 static struct uverbs_lock_class mw_lock_class   = { .name = "MW-uobj" };
52 static struct uverbs_lock_class cq_lock_class   = { .name = "CQ-uobj" };
53 static struct uverbs_lock_class qp_lock_class   = { .name = "QP-uobj" };
54 static struct uverbs_lock_class ah_lock_class   = { .name = "AH-uobj" };
55 static struct uverbs_lock_class srq_lock_class  = { .name = "SRQ-uobj" };
56 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
57 #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
58 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
59 #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
60
61 /*
62  * The ib_uobject locking scheme is as follows:
63  *
64  * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65  *   needs to be held during all idr operations.  When an object is
66  *   looked up, a reference must be taken on the object's kref before
67  *   dropping this lock.
68  *
69  * - Each object also has an rwsem.  This rwsem must be held for
70  *   reading while an operation that uses the object is performed.
71  *   For example, while registering an MR, the associated PD's
72  *   uobject.mutex must be held for reading.  The rwsem must be held
73  *   for writing while initializing or destroying an object.
74  *
75  * - In addition, each object has a "live" flag.  If this flag is not
76  *   set, then lookups of the object will fail even if it is found in
77  *   the idr.  This handles a reader that blocks and does not acquire
78  *   the rwsem until after the object is destroyed.  The destroy
79  *   operation will set the live flag to 0 and then drop the rwsem;
80  *   this will allow the reader to acquire the rwsem, see that the
81  *   live flag is 0, and then drop the rwsem and its reference to
82  *   object.  The underlying storage will not be freed until the last
83  *   reference to the object is dropped.
84  */
85
86 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
87                       struct ib_ucontext *context, struct uverbs_lock_class *c)
88 {
89         uobj->user_handle = user_handle;
90         uobj->context     = context;
91         kref_init(&uobj->ref);
92         init_rwsem(&uobj->mutex);
93         lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
94         uobj->live        = 0;
95 }
96
97 static void release_uobj(struct kref *kref)
98 {
99         kfree(container_of(kref, struct ib_uobject, ref));
100 }
101
102 static void put_uobj(struct ib_uobject *uobj)
103 {
104         kref_put(&uobj->ref, release_uobj);
105 }
106
107 static void put_uobj_read(struct ib_uobject *uobj)
108 {
109         up_read(&uobj->mutex);
110         put_uobj(uobj);
111 }
112
113 static void put_uobj_write(struct ib_uobject *uobj)
114 {
115         up_write(&uobj->mutex);
116         put_uobj(uobj);
117 }
118
119 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
120 {
121         int ret;
122
123         idr_preload(GFP_KERNEL);
124         spin_lock(&ib_uverbs_idr_lock);
125
126         ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
127         if (ret >= 0)
128                 uobj->id = ret;
129
130         spin_unlock(&ib_uverbs_idr_lock);
131         idr_preload_end();
132
133         return ret < 0 ? ret : 0;
134 }
135
136 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
137 {
138         spin_lock(&ib_uverbs_idr_lock);
139         idr_remove(idr, uobj->id);
140         spin_unlock(&ib_uverbs_idr_lock);
141 }
142
143 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
144                                          struct ib_ucontext *context)
145 {
146         struct ib_uobject *uobj;
147
148         spin_lock(&ib_uverbs_idr_lock);
149         uobj = idr_find(idr, id);
150         if (uobj) {
151                 if (uobj->context == context)
152                         kref_get(&uobj->ref);
153                 else
154                         uobj = NULL;
155         }
156         spin_unlock(&ib_uverbs_idr_lock);
157
158         return uobj;
159 }
160
161 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
162                                         struct ib_ucontext *context, int nested)
163 {
164         struct ib_uobject *uobj;
165
166         uobj = __idr_get_uobj(idr, id, context);
167         if (!uobj)
168                 return NULL;
169
170         if (nested)
171                 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
172         else
173                 down_read(&uobj->mutex);
174         if (!uobj->live) {
175                 put_uobj_read(uobj);
176                 return NULL;
177         }
178
179         return uobj;
180 }
181
182 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
183                                          struct ib_ucontext *context)
184 {
185         struct ib_uobject *uobj;
186
187         uobj = __idr_get_uobj(idr, id, context);
188         if (!uobj)
189                 return NULL;
190
191         down_write(&uobj->mutex);
192         if (!uobj->live) {
193                 put_uobj_write(uobj);
194                 return NULL;
195         }
196
197         return uobj;
198 }
199
200 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
201                           int nested)
202 {
203         struct ib_uobject *uobj;
204
205         uobj = idr_read_uobj(idr, id, context, nested);
206         return uobj ? uobj->object : NULL;
207 }
208
209 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
210 {
211         return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
212 }
213
214 static void put_pd_read(struct ib_pd *pd)
215 {
216         put_uobj_read(pd->uobject);
217 }
218
219 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
220 {
221         return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
222 }
223
224 static void put_cq_read(struct ib_cq *cq)
225 {
226         put_uobj_read(cq->uobject);
227 }
228
229 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
230 {
231         return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
232 }
233
234 static void put_ah_read(struct ib_ah *ah)
235 {
236         put_uobj_read(ah->uobject);
237 }
238
239 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
240 {
241         return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
242 }
243
244 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
245 {
246         struct ib_uobject *uobj;
247
248         uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
249         return uobj ? uobj->object : NULL;
250 }
251
252 static void put_qp_read(struct ib_qp *qp)
253 {
254         put_uobj_read(qp->uobject);
255 }
256
257 static void put_qp_write(struct ib_qp *qp)
258 {
259         put_uobj_write(qp->uobject);
260 }
261
262 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
263 {
264         return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
265 }
266
267 static void put_srq_read(struct ib_srq *srq)
268 {
269         put_uobj_read(srq->uobject);
270 }
271
272 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
273                                      struct ib_uobject **uobj)
274 {
275         *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
276         return *uobj ? (*uobj)->object : NULL;
277 }
278
279 static void put_xrcd_read(struct ib_uobject *uobj)
280 {
281         put_uobj_read(uobj);
282 }
283
284 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
285                               const char __user *buf,
286                               int in_len, int out_len)
287 {
288         struct ib_uverbs_get_context      cmd;
289         struct ib_uverbs_get_context_resp resp;
290         struct ib_udata                   udata;
291         struct ib_device                 *ibdev = file->device->ib_dev;
292         struct ib_ucontext               *ucontext;
293         struct file                      *filp;
294         int ret;
295
296         if (out_len < sizeof resp)
297                 return -ENOSPC;
298
299         if (copy_from_user(&cmd, buf, sizeof cmd))
300                 return -EFAULT;
301
302         mutex_lock(&file->mutex);
303
304         if (file->ucontext) {
305                 ret = -EINVAL;
306                 goto err;
307         }
308
309         INIT_UDATA(&udata, buf + sizeof cmd,
310                    (unsigned long) cmd.response + sizeof resp,
311                    in_len - sizeof cmd, out_len - sizeof resp);
312
313         ucontext = ibdev->alloc_ucontext(ibdev, &udata);
314         if (IS_ERR(ucontext)) {
315                 ret = PTR_ERR(ucontext);
316                 goto err;
317         }
318
319         ucontext->device = ibdev;
320         INIT_LIST_HEAD(&ucontext->pd_list);
321         INIT_LIST_HEAD(&ucontext->mr_list);
322         INIT_LIST_HEAD(&ucontext->mw_list);
323         INIT_LIST_HEAD(&ucontext->cq_list);
324         INIT_LIST_HEAD(&ucontext->qp_list);
325         INIT_LIST_HEAD(&ucontext->srq_list);
326         INIT_LIST_HEAD(&ucontext->ah_list);
327         INIT_LIST_HEAD(&ucontext->xrcd_list);
328         INIT_LIST_HEAD(&ucontext->rule_list);
329         ucontext->closing = 0;
330
331         resp.num_comp_vectors = file->device->num_comp_vectors;
332
333         ret = get_unused_fd_flags(O_CLOEXEC);
334         if (ret < 0)
335                 goto err_free;
336         resp.async_fd = ret;
337
338         filp = ib_uverbs_alloc_event_file(file, 1);
339         if (IS_ERR(filp)) {
340                 ret = PTR_ERR(filp);
341                 goto err_fd;
342         }
343
344         if (copy_to_user((void __user *) (unsigned long) cmd.response,
345                          &resp, sizeof resp)) {
346                 ret = -EFAULT;
347                 goto err_file;
348         }
349
350         file->async_file = filp->private_data;
351
352         INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
353                               ib_uverbs_event_handler);
354         ret = ib_register_event_handler(&file->event_handler);
355         if (ret)
356                 goto err_file;
357
358         kref_get(&file->async_file->ref);
359         kref_get(&file->ref);
360         file->ucontext = ucontext;
361
362         fd_install(resp.async_fd, filp);
363
364         mutex_unlock(&file->mutex);
365
366         return in_len;
367
368 err_file:
369         fput(filp);
370
371 err_fd:
372         put_unused_fd(resp.async_fd);
373
374 err_free:
375         ibdev->dealloc_ucontext(ucontext);
376
377 err:
378         mutex_unlock(&file->mutex);
379         return ret;
380 }
381
382 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
383                                const char __user *buf,
384                                int in_len, int out_len)
385 {
386         struct ib_uverbs_query_device      cmd;
387         struct ib_uverbs_query_device_resp resp;
388         struct ib_device_attr              attr;
389         int                                ret;
390
391         if (out_len < sizeof resp)
392                 return -ENOSPC;
393
394         if (copy_from_user(&cmd, buf, sizeof cmd))
395                 return -EFAULT;
396
397         ret = ib_query_device(file->device->ib_dev, &attr);
398         if (ret)
399                 return ret;
400
401         memset(&resp, 0, sizeof resp);
402
403         resp.fw_ver                    = attr.fw_ver;
404         resp.node_guid                 = file->device->ib_dev->node_guid;
405         resp.sys_image_guid            = attr.sys_image_guid;
406         resp.max_mr_size               = attr.max_mr_size;
407         resp.page_size_cap             = attr.page_size_cap;
408         resp.vendor_id                 = attr.vendor_id;
409         resp.vendor_part_id            = attr.vendor_part_id;
410         resp.hw_ver                    = attr.hw_ver;
411         resp.max_qp                    = attr.max_qp;
412         resp.max_qp_wr                 = attr.max_qp_wr;
413         resp.device_cap_flags          = attr.device_cap_flags;
414         resp.max_sge                   = attr.max_sge;
415         resp.max_sge_rd                = attr.max_sge_rd;
416         resp.max_cq                    = attr.max_cq;
417         resp.max_cqe                   = attr.max_cqe;
418         resp.max_mr                    = attr.max_mr;
419         resp.max_pd                    = attr.max_pd;
420         resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
421         resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
422         resp.max_res_rd_atom           = attr.max_res_rd_atom;
423         resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
424         resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
425         resp.atomic_cap                = attr.atomic_cap;
426         resp.max_ee                    = attr.max_ee;
427         resp.max_rdd                   = attr.max_rdd;
428         resp.max_mw                    = attr.max_mw;
429         resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
430         resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
431         resp.max_mcast_grp             = attr.max_mcast_grp;
432         resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
433         resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
434         resp.max_ah                    = attr.max_ah;
435         resp.max_fmr                   = attr.max_fmr;
436         resp.max_map_per_fmr           = attr.max_map_per_fmr;
437         resp.max_srq                   = attr.max_srq;
438         resp.max_srq_wr                = attr.max_srq_wr;
439         resp.max_srq_sge               = attr.max_srq_sge;
440         resp.max_pkeys                 = attr.max_pkeys;
441         resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
442         resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
443
444         if (copy_to_user((void __user *) (unsigned long) cmd.response,
445                          &resp, sizeof resp))
446                 return -EFAULT;
447
448         return in_len;
449 }
450
451 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
452                              const char __user *buf,
453                              int in_len, int out_len)
454 {
455         struct ib_uverbs_query_port      cmd;
456         struct ib_uverbs_query_port_resp resp;
457         struct ib_port_attr              attr;
458         int                              ret;
459
460         if (out_len < sizeof resp)
461                 return -ENOSPC;
462
463         if (copy_from_user(&cmd, buf, sizeof cmd))
464                 return -EFAULT;
465
466         ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
467         if (ret)
468                 return ret;
469
470         memset(&resp, 0, sizeof resp);
471
472         resp.state           = attr.state;
473         resp.max_mtu         = attr.max_mtu;
474         resp.active_mtu      = attr.active_mtu;
475         resp.gid_tbl_len     = attr.gid_tbl_len;
476         resp.port_cap_flags  = attr.port_cap_flags;
477         resp.max_msg_sz      = attr.max_msg_sz;
478         resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
479         resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
480         resp.pkey_tbl_len    = attr.pkey_tbl_len;
481         resp.lid             = attr.lid;
482         resp.sm_lid          = attr.sm_lid;
483         resp.lmc             = attr.lmc;
484         resp.max_vl_num      = attr.max_vl_num;
485         resp.sm_sl           = attr.sm_sl;
486         resp.subnet_timeout  = attr.subnet_timeout;
487         resp.init_type_reply = attr.init_type_reply;
488         resp.active_width    = attr.active_width;
489         resp.active_speed    = attr.active_speed;
490         resp.phys_state      = attr.phys_state;
491         resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
492                                                         cmd.port_num);
493
494         if (copy_to_user((void __user *) (unsigned long) cmd.response,
495                          &resp, sizeof resp))
496                 return -EFAULT;
497
498         return in_len;
499 }
500
501 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
502                            const char __user *buf,
503                            int in_len, int out_len)
504 {
505         struct ib_uverbs_alloc_pd      cmd;
506         struct ib_uverbs_alloc_pd_resp resp;
507         struct ib_udata                udata;
508         struct ib_uobject             *uobj;
509         struct ib_pd                  *pd;
510         int                            ret;
511
512         if (out_len < sizeof resp)
513                 return -ENOSPC;
514
515         if (copy_from_user(&cmd, buf, sizeof cmd))
516                 return -EFAULT;
517
518         INIT_UDATA(&udata, buf + sizeof cmd,
519                    (unsigned long) cmd.response + sizeof resp,
520                    in_len - sizeof cmd, out_len - sizeof resp);
521
522         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
523         if (!uobj)
524                 return -ENOMEM;
525
526         init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
527         down_write(&uobj->mutex);
528
529         pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
530                                             file->ucontext, &udata);
531         if (IS_ERR(pd)) {
532                 ret = PTR_ERR(pd);
533                 goto err;
534         }
535
536         pd->device  = file->device->ib_dev;
537         pd->uobject = uobj;
538         atomic_set(&pd->usecnt, 0);
539
540         uobj->object = pd;
541         ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
542         if (ret)
543                 goto err_idr;
544
545         memset(&resp, 0, sizeof resp);
546         resp.pd_handle = uobj->id;
547
548         if (copy_to_user((void __user *) (unsigned long) cmd.response,
549                          &resp, sizeof resp)) {
550                 ret = -EFAULT;
551                 goto err_copy;
552         }
553
554         mutex_lock(&file->mutex);
555         list_add_tail(&uobj->list, &file->ucontext->pd_list);
556         mutex_unlock(&file->mutex);
557
558         uobj->live = 1;
559
560         up_write(&uobj->mutex);
561
562         return in_len;
563
564 err_copy:
565         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
566
567 err_idr:
568         ib_dealloc_pd(pd);
569
570 err:
571         put_uobj_write(uobj);
572         return ret;
573 }
574
575 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
576                              const char __user *buf,
577                              int in_len, int out_len)
578 {
579         struct ib_uverbs_dealloc_pd cmd;
580         struct ib_uobject          *uobj;
581         int                         ret;
582
583         if (copy_from_user(&cmd, buf, sizeof cmd))
584                 return -EFAULT;
585
586         uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
587         if (!uobj)
588                 return -EINVAL;
589
590         ret = ib_dealloc_pd(uobj->object);
591         if (!ret)
592                 uobj->live = 0;
593
594         put_uobj_write(uobj);
595
596         if (ret)
597                 return ret;
598
599         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
600
601         mutex_lock(&file->mutex);
602         list_del(&uobj->list);
603         mutex_unlock(&file->mutex);
604
605         put_uobj(uobj);
606
607         return in_len;
608 }
609
610 struct xrcd_table_entry {
611         struct rb_node  node;
612         struct ib_xrcd *xrcd;
613         struct inode   *inode;
614 };
615
616 static int xrcd_table_insert(struct ib_uverbs_device *dev,
617                             struct inode *inode,
618                             struct ib_xrcd *xrcd)
619 {
620         struct xrcd_table_entry *entry, *scan;
621         struct rb_node **p = &dev->xrcd_tree.rb_node;
622         struct rb_node *parent = NULL;
623
624         entry = kmalloc(sizeof *entry, GFP_KERNEL);
625         if (!entry)
626                 return -ENOMEM;
627
628         entry->xrcd  = xrcd;
629         entry->inode = inode;
630
631         while (*p) {
632                 parent = *p;
633                 scan = rb_entry(parent, struct xrcd_table_entry, node);
634
635                 if (inode < scan->inode) {
636                         p = &(*p)->rb_left;
637                 } else if (inode > scan->inode) {
638                         p = &(*p)->rb_right;
639                 } else {
640                         kfree(entry);
641                         return -EEXIST;
642                 }
643         }
644
645         rb_link_node(&entry->node, parent, p);
646         rb_insert_color(&entry->node, &dev->xrcd_tree);
647         igrab(inode);
648         return 0;
649 }
650
651 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
652                                                   struct inode *inode)
653 {
654         struct xrcd_table_entry *entry;
655         struct rb_node *p = dev->xrcd_tree.rb_node;
656
657         while (p) {
658                 entry = rb_entry(p, struct xrcd_table_entry, node);
659
660                 if (inode < entry->inode)
661                         p = p->rb_left;
662                 else if (inode > entry->inode)
663                         p = p->rb_right;
664                 else
665                         return entry;
666         }
667
668         return NULL;
669 }
670
671 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
672 {
673         struct xrcd_table_entry *entry;
674
675         entry = xrcd_table_search(dev, inode);
676         if (!entry)
677                 return NULL;
678
679         return entry->xrcd;
680 }
681
682 static void xrcd_table_delete(struct ib_uverbs_device *dev,
683                               struct inode *inode)
684 {
685         struct xrcd_table_entry *entry;
686
687         entry = xrcd_table_search(dev, inode);
688         if (entry) {
689                 iput(inode);
690                 rb_erase(&entry->node, &dev->xrcd_tree);
691                 kfree(entry);
692         }
693 }
694
695 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
696                             const char __user *buf, int in_len,
697                             int out_len)
698 {
699         struct ib_uverbs_open_xrcd      cmd;
700         struct ib_uverbs_open_xrcd_resp resp;
701         struct ib_udata                 udata;
702         struct ib_uxrcd_object         *obj;
703         struct ib_xrcd                 *xrcd = NULL;
704         struct fd                       f = {NULL, 0};
705         struct inode                   *inode = NULL;
706         int                             ret = 0;
707         int                             new_xrcd = 0;
708
709         if (out_len < sizeof resp)
710                 return -ENOSPC;
711
712         if (copy_from_user(&cmd, buf, sizeof cmd))
713                 return -EFAULT;
714
715         INIT_UDATA(&udata, buf + sizeof cmd,
716                    (unsigned long) cmd.response + sizeof resp,
717                    in_len - sizeof cmd, out_len - sizeof  resp);
718
719         mutex_lock(&file->device->xrcd_tree_mutex);
720
721         if (cmd.fd != -1) {
722                 /* search for file descriptor */
723                 f = fdget(cmd.fd);
724                 if (!f.file) {
725                         ret = -EBADF;
726                         goto err_tree_mutex_unlock;
727                 }
728
729                 inode = file_inode(f.file);
730                 xrcd = find_xrcd(file->device, inode);
731                 if (!xrcd && !(cmd.oflags & O_CREAT)) {
732                         /* no file descriptor. Need CREATE flag */
733                         ret = -EAGAIN;
734                         goto err_tree_mutex_unlock;
735                 }
736
737                 if (xrcd && cmd.oflags & O_EXCL) {
738                         ret = -EINVAL;
739                         goto err_tree_mutex_unlock;
740                 }
741         }
742
743         obj = kmalloc(sizeof *obj, GFP_KERNEL);
744         if (!obj) {
745                 ret = -ENOMEM;
746                 goto err_tree_mutex_unlock;
747         }
748
749         init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
750
751         down_write(&obj->uobject.mutex);
752
753         if (!xrcd) {
754                 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
755                                                         file->ucontext, &udata);
756                 if (IS_ERR(xrcd)) {
757                         ret = PTR_ERR(xrcd);
758                         goto err;
759                 }
760
761                 xrcd->inode   = inode;
762                 xrcd->device  = file->device->ib_dev;
763                 atomic_set(&xrcd->usecnt, 0);
764                 mutex_init(&xrcd->tgt_qp_mutex);
765                 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
766                 new_xrcd = 1;
767         }
768
769         atomic_set(&obj->refcnt, 0);
770         obj->uobject.object = xrcd;
771         ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
772         if (ret)
773                 goto err_idr;
774
775         memset(&resp, 0, sizeof resp);
776         resp.xrcd_handle = obj->uobject.id;
777
778         if (inode) {
779                 if (new_xrcd) {
780                         /* create new inode/xrcd table entry */
781                         ret = xrcd_table_insert(file->device, inode, xrcd);
782                         if (ret)
783                                 goto err_insert_xrcd;
784                 }
785                 atomic_inc(&xrcd->usecnt);
786         }
787
788         if (copy_to_user((void __user *) (unsigned long) cmd.response,
789                          &resp, sizeof resp)) {
790                 ret = -EFAULT;
791                 goto err_copy;
792         }
793
794         if (f.file)
795                 fdput(f);
796
797         mutex_lock(&file->mutex);
798         list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
799         mutex_unlock(&file->mutex);
800
801         obj->uobject.live = 1;
802         up_write(&obj->uobject.mutex);
803
804         mutex_unlock(&file->device->xrcd_tree_mutex);
805         return in_len;
806
807 err_copy:
808         if (inode) {
809                 if (new_xrcd)
810                         xrcd_table_delete(file->device, inode);
811                 atomic_dec(&xrcd->usecnt);
812         }
813
814 err_insert_xrcd:
815         idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
816
817 err_idr:
818         ib_dealloc_xrcd(xrcd);
819
820 err:
821         put_uobj_write(&obj->uobject);
822
823 err_tree_mutex_unlock:
824         if (f.file)
825                 fdput(f);
826
827         mutex_unlock(&file->device->xrcd_tree_mutex);
828
829         return ret;
830 }
831
832 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
833                              const char __user *buf, int in_len,
834                              int out_len)
835 {
836         struct ib_uverbs_close_xrcd cmd;
837         struct ib_uobject           *uobj;
838         struct ib_xrcd              *xrcd = NULL;
839         struct inode                *inode = NULL;
840         struct ib_uxrcd_object      *obj;
841         int                         live;
842         int                         ret = 0;
843
844         if (copy_from_user(&cmd, buf, sizeof cmd))
845                 return -EFAULT;
846
847         mutex_lock(&file->device->xrcd_tree_mutex);
848         uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
849         if (!uobj) {
850                 ret = -EINVAL;
851                 goto out;
852         }
853
854         xrcd  = uobj->object;
855         inode = xrcd->inode;
856         obj   = container_of(uobj, struct ib_uxrcd_object, uobject);
857         if (atomic_read(&obj->refcnt)) {
858                 put_uobj_write(uobj);
859                 ret = -EBUSY;
860                 goto out;
861         }
862
863         if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
864                 ret = ib_dealloc_xrcd(uobj->object);
865                 if (!ret)
866                         uobj->live = 0;
867         }
868
869         live = uobj->live;
870         if (inode && ret)
871                 atomic_inc(&xrcd->usecnt);
872
873         put_uobj_write(uobj);
874
875         if (ret)
876                 goto out;
877
878         if (inode && !live)
879                 xrcd_table_delete(file->device, inode);
880
881         idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
882         mutex_lock(&file->mutex);
883         list_del(&uobj->list);
884         mutex_unlock(&file->mutex);
885
886         put_uobj(uobj);
887         ret = in_len;
888
889 out:
890         mutex_unlock(&file->device->xrcd_tree_mutex);
891         return ret;
892 }
893
894 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
895                             struct ib_xrcd *xrcd)
896 {
897         struct inode *inode;
898
899         inode = xrcd->inode;
900         if (inode && !atomic_dec_and_test(&xrcd->usecnt))
901                 return;
902
903         ib_dealloc_xrcd(xrcd);
904
905         if (inode)
906                 xrcd_table_delete(dev, inode);
907 }
908
909 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
910                          const char __user *buf, int in_len,
911                          int out_len)
912 {
913         struct ib_uverbs_reg_mr      cmd;
914         struct ib_uverbs_reg_mr_resp resp;
915         struct ib_udata              udata;
916         struct ib_uobject           *uobj;
917         struct ib_pd                *pd;
918         struct ib_mr                *mr;
919         int                          ret;
920
921         if (out_len < sizeof resp)
922                 return -ENOSPC;
923
924         if (copy_from_user(&cmd, buf, sizeof cmd))
925                 return -EFAULT;
926
927         INIT_UDATA(&udata, buf + sizeof cmd,
928                    (unsigned long) cmd.response + sizeof resp,
929                    in_len - sizeof cmd, out_len - sizeof resp);
930
931         if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
932                 return -EINVAL;
933
934         /*
935          * Local write permission is required if remote write or
936          * remote atomic permission is also requested.
937          */
938         if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
939             !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
940                 return -EINVAL;
941
942         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
943         if (!uobj)
944                 return -ENOMEM;
945
946         init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
947         down_write(&uobj->mutex);
948
949         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
950         if (!pd) {
951                 ret = -EINVAL;
952                 goto err_free;
953         }
954
955         mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
956                                      cmd.access_flags, &udata);
957         if (IS_ERR(mr)) {
958                 ret = PTR_ERR(mr);
959                 goto err_put;
960         }
961
962         mr->device  = pd->device;
963         mr->pd      = pd;
964         mr->uobject = uobj;
965         atomic_inc(&pd->usecnt);
966         atomic_set(&mr->usecnt, 0);
967
968         uobj->object = mr;
969         ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
970         if (ret)
971                 goto err_unreg;
972
973         memset(&resp, 0, sizeof resp);
974         resp.lkey      = mr->lkey;
975         resp.rkey      = mr->rkey;
976         resp.mr_handle = uobj->id;
977
978         if (copy_to_user((void __user *) (unsigned long) cmd.response,
979                          &resp, sizeof resp)) {
980                 ret = -EFAULT;
981                 goto err_copy;
982         }
983
984         put_pd_read(pd);
985
986         mutex_lock(&file->mutex);
987         list_add_tail(&uobj->list, &file->ucontext->mr_list);
988         mutex_unlock(&file->mutex);
989
990         uobj->live = 1;
991
992         up_write(&uobj->mutex);
993
994         return in_len;
995
996 err_copy:
997         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
998
999 err_unreg:
1000         ib_dereg_mr(mr);
1001
1002 err_put:
1003         put_pd_read(pd);
1004
1005 err_free:
1006         put_uobj_write(uobj);
1007         return ret;
1008 }
1009
1010 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1011                            const char __user *buf, int in_len,
1012                            int out_len)
1013 {
1014         struct ib_uverbs_dereg_mr cmd;
1015         struct ib_mr             *mr;
1016         struct ib_uobject        *uobj;
1017         int                       ret = -EINVAL;
1018
1019         if (copy_from_user(&cmd, buf, sizeof cmd))
1020                 return -EFAULT;
1021
1022         uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1023         if (!uobj)
1024                 return -EINVAL;
1025
1026         mr = uobj->object;
1027
1028         ret = ib_dereg_mr(mr);
1029         if (!ret)
1030                 uobj->live = 0;
1031
1032         put_uobj_write(uobj);
1033
1034         if (ret)
1035                 return ret;
1036
1037         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1038
1039         mutex_lock(&file->mutex);
1040         list_del(&uobj->list);
1041         mutex_unlock(&file->mutex);
1042
1043         put_uobj(uobj);
1044
1045         return in_len;
1046 }
1047
1048 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1049                          const char __user *buf, int in_len,
1050                          int out_len)
1051 {
1052         struct ib_uverbs_alloc_mw      cmd;
1053         struct ib_uverbs_alloc_mw_resp resp;
1054         struct ib_uobject             *uobj;
1055         struct ib_pd                  *pd;
1056         struct ib_mw                  *mw;
1057         int                            ret;
1058
1059         if (out_len < sizeof(resp))
1060                 return -ENOSPC;
1061
1062         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1063                 return -EFAULT;
1064
1065         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1066         if (!uobj)
1067                 return -ENOMEM;
1068
1069         init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1070         down_write(&uobj->mutex);
1071
1072         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1073         if (!pd) {
1074                 ret = -EINVAL;
1075                 goto err_free;
1076         }
1077
1078         mw = pd->device->alloc_mw(pd, cmd.mw_type);
1079         if (IS_ERR(mw)) {
1080                 ret = PTR_ERR(mw);
1081                 goto err_put;
1082         }
1083
1084         mw->device  = pd->device;
1085         mw->pd      = pd;
1086         mw->uobject = uobj;
1087         atomic_inc(&pd->usecnt);
1088
1089         uobj->object = mw;
1090         ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1091         if (ret)
1092                 goto err_unalloc;
1093
1094         memset(&resp, 0, sizeof(resp));
1095         resp.rkey      = mw->rkey;
1096         resp.mw_handle = uobj->id;
1097
1098         if (copy_to_user((void __user *)(unsigned long)cmd.response,
1099                          &resp, sizeof(resp))) {
1100                 ret = -EFAULT;
1101                 goto err_copy;
1102         }
1103
1104         put_pd_read(pd);
1105
1106         mutex_lock(&file->mutex);
1107         list_add_tail(&uobj->list, &file->ucontext->mw_list);
1108         mutex_unlock(&file->mutex);
1109
1110         uobj->live = 1;
1111
1112         up_write(&uobj->mutex);
1113
1114         return in_len;
1115
1116 err_copy:
1117         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1118
1119 err_unalloc:
1120         ib_dealloc_mw(mw);
1121
1122 err_put:
1123         put_pd_read(pd);
1124
1125 err_free:
1126         put_uobj_write(uobj);
1127         return ret;
1128 }
1129
1130 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1131                            const char __user *buf, int in_len,
1132                            int out_len)
1133 {
1134         struct ib_uverbs_dealloc_mw cmd;
1135         struct ib_mw               *mw;
1136         struct ib_uobject          *uobj;
1137         int                         ret = -EINVAL;
1138
1139         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1140                 return -EFAULT;
1141
1142         uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1143         if (!uobj)
1144                 return -EINVAL;
1145
1146         mw = uobj->object;
1147
1148         ret = ib_dealloc_mw(mw);
1149         if (!ret)
1150                 uobj->live = 0;
1151
1152         put_uobj_write(uobj);
1153
1154         if (ret)
1155                 return ret;
1156
1157         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1158
1159         mutex_lock(&file->mutex);
1160         list_del(&uobj->list);
1161         mutex_unlock(&file->mutex);
1162
1163         put_uobj(uobj);
1164
1165         return in_len;
1166 }
1167
1168 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1169                                       const char __user *buf, int in_len,
1170                                       int out_len)
1171 {
1172         struct ib_uverbs_create_comp_channel       cmd;
1173         struct ib_uverbs_create_comp_channel_resp  resp;
1174         struct file                               *filp;
1175         int ret;
1176
1177         if (out_len < sizeof resp)
1178                 return -ENOSPC;
1179
1180         if (copy_from_user(&cmd, buf, sizeof cmd))
1181                 return -EFAULT;
1182
1183         ret = get_unused_fd_flags(O_CLOEXEC);
1184         if (ret < 0)
1185                 return ret;
1186         resp.fd = ret;
1187
1188         filp = ib_uverbs_alloc_event_file(file, 0);
1189         if (IS_ERR(filp)) {
1190                 put_unused_fd(resp.fd);
1191                 return PTR_ERR(filp);
1192         }
1193
1194         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1195                          &resp, sizeof resp)) {
1196                 put_unused_fd(resp.fd);
1197                 fput(filp);
1198                 return -EFAULT;
1199         }
1200
1201         fd_install(resp.fd, filp);
1202         return in_len;
1203 }
1204
1205 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1206                             const char __user *buf, int in_len,
1207                             int out_len)
1208 {
1209         struct ib_uverbs_create_cq      cmd;
1210         struct ib_uverbs_create_cq_resp resp;
1211         struct ib_udata                 udata;
1212         struct ib_ucq_object           *obj;
1213         struct ib_uverbs_event_file    *ev_file = NULL;
1214         struct ib_cq                   *cq;
1215         int                             ret;
1216
1217         if (out_len < sizeof resp)
1218                 return -ENOSPC;
1219
1220         if (copy_from_user(&cmd, buf, sizeof cmd))
1221                 return -EFAULT;
1222
1223         INIT_UDATA(&udata, buf + sizeof cmd,
1224                    (unsigned long) cmd.response + sizeof resp,
1225                    in_len - sizeof cmd, out_len - sizeof resp);
1226
1227         if (cmd.comp_vector >= file->device->num_comp_vectors)
1228                 return -EINVAL;
1229
1230         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1231         if (!obj)
1232                 return -ENOMEM;
1233
1234         init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
1235         down_write(&obj->uobject.mutex);
1236
1237         if (cmd.comp_channel >= 0) {
1238                 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1239                 if (!ev_file) {
1240                         ret = -EINVAL;
1241                         goto err;
1242                 }
1243         }
1244
1245         obj->uverbs_file           = file;
1246         obj->comp_events_reported  = 0;
1247         obj->async_events_reported = 0;
1248         INIT_LIST_HEAD(&obj->comp_list);
1249         INIT_LIST_HEAD(&obj->async_list);
1250
1251         cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1252                                              cmd.comp_vector,
1253                                              file->ucontext, &udata);
1254         if (IS_ERR(cq)) {
1255                 ret = PTR_ERR(cq);
1256                 goto err_file;
1257         }
1258
1259         cq->device        = file->device->ib_dev;
1260         cq->uobject       = &obj->uobject;
1261         cq->comp_handler  = ib_uverbs_comp_handler;
1262         cq->event_handler = ib_uverbs_cq_event_handler;
1263         cq->cq_context    = ev_file;
1264         atomic_set(&cq->usecnt, 0);
1265
1266         obj->uobject.object = cq;
1267         ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1268         if (ret)
1269                 goto err_free;
1270
1271         memset(&resp, 0, sizeof resp);
1272         resp.cq_handle = obj->uobject.id;
1273         resp.cqe       = cq->cqe;
1274
1275         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1276                          &resp, sizeof resp)) {
1277                 ret = -EFAULT;
1278                 goto err_copy;
1279         }
1280
1281         mutex_lock(&file->mutex);
1282         list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1283         mutex_unlock(&file->mutex);
1284
1285         obj->uobject.live = 1;
1286
1287         up_write(&obj->uobject.mutex);
1288
1289         return in_len;
1290
1291 err_copy:
1292         idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1293
1294 err_free:
1295         ib_destroy_cq(cq);
1296
1297 err_file:
1298         if (ev_file)
1299                 ib_uverbs_release_ucq(file, ev_file, obj);
1300
1301 err:
1302         put_uobj_write(&obj->uobject);
1303         return ret;
1304 }
1305
1306 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1307                             const char __user *buf, int in_len,
1308                             int out_len)
1309 {
1310         struct ib_uverbs_resize_cq      cmd;
1311         struct ib_uverbs_resize_cq_resp resp;
1312         struct ib_udata                 udata;
1313         struct ib_cq                    *cq;
1314         int                             ret = -EINVAL;
1315
1316         if (copy_from_user(&cmd, buf, sizeof cmd))
1317                 return -EFAULT;
1318
1319         INIT_UDATA(&udata, buf + sizeof cmd,
1320                    (unsigned long) cmd.response + sizeof resp,
1321                    in_len - sizeof cmd, out_len - sizeof resp);
1322
1323         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1324         if (!cq)
1325                 return -EINVAL;
1326
1327         ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1328         if (ret)
1329                 goto out;
1330
1331         resp.cqe = cq->cqe;
1332
1333         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1334                          &resp, sizeof resp.cqe))
1335                 ret = -EFAULT;
1336
1337 out:
1338         put_cq_read(cq);
1339
1340         return ret ? ret : in_len;
1341 }
1342
1343 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1344 {
1345         struct ib_uverbs_wc tmp;
1346
1347         tmp.wr_id               = wc->wr_id;
1348         tmp.status              = wc->status;
1349         tmp.opcode              = wc->opcode;
1350         tmp.vendor_err          = wc->vendor_err;
1351         tmp.byte_len            = wc->byte_len;
1352         tmp.ex.imm_data         = (__u32 __force) wc->ex.imm_data;
1353         tmp.qp_num              = wc->qp->qp_num;
1354         tmp.src_qp              = wc->src_qp;
1355         tmp.wc_flags            = wc->wc_flags;
1356         tmp.pkey_index          = wc->pkey_index;
1357         tmp.slid                = wc->slid;
1358         tmp.sl                  = wc->sl;
1359         tmp.dlid_path_bits      = wc->dlid_path_bits;
1360         tmp.port_num            = wc->port_num;
1361         tmp.reserved            = 0;
1362
1363         if (copy_to_user(dest, &tmp, sizeof tmp))
1364                 return -EFAULT;
1365
1366         return 0;
1367 }
1368
1369 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1370                           const char __user *buf, int in_len,
1371                           int out_len)
1372 {
1373         struct ib_uverbs_poll_cq       cmd;
1374         struct ib_uverbs_poll_cq_resp  resp;
1375         u8 __user                     *header_ptr;
1376         u8 __user                     *data_ptr;
1377         struct ib_cq                  *cq;
1378         struct ib_wc                   wc;
1379         int                            ret;
1380
1381         if (copy_from_user(&cmd, buf, sizeof cmd))
1382                 return -EFAULT;
1383
1384         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1385         if (!cq)
1386                 return -EINVAL;
1387
1388         /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1389         header_ptr = (void __user *)(unsigned long) cmd.response;
1390         data_ptr = header_ptr + sizeof resp;
1391
1392         memset(&resp, 0, sizeof resp);
1393         while (resp.count < cmd.ne) {
1394                 ret = ib_poll_cq(cq, 1, &wc);
1395                 if (ret < 0)
1396                         goto out_put;
1397                 if (!ret)
1398                         break;
1399
1400                 ret = copy_wc_to_user(data_ptr, &wc);
1401                 if (ret)
1402                         goto out_put;
1403
1404                 data_ptr += sizeof(struct ib_uverbs_wc);
1405                 ++resp.count;
1406         }
1407
1408         if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1409                 ret = -EFAULT;
1410                 goto out_put;
1411         }
1412
1413         ret = in_len;
1414
1415 out_put:
1416         put_cq_read(cq);
1417         return ret;
1418 }
1419
1420 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1421                                 const char __user *buf, int in_len,
1422                                 int out_len)
1423 {
1424         struct ib_uverbs_req_notify_cq cmd;
1425         struct ib_cq                  *cq;
1426
1427         if (copy_from_user(&cmd, buf, sizeof cmd))
1428                 return -EFAULT;
1429
1430         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1431         if (!cq)
1432                 return -EINVAL;
1433
1434         ib_req_notify_cq(cq, cmd.solicited_only ?
1435                          IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1436
1437         put_cq_read(cq);
1438
1439         return in_len;
1440 }
1441
1442 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1443                              const char __user *buf, int in_len,
1444                              int out_len)
1445 {
1446         struct ib_uverbs_destroy_cq      cmd;
1447         struct ib_uverbs_destroy_cq_resp resp;
1448         struct ib_uobject               *uobj;
1449         struct ib_cq                    *cq;
1450         struct ib_ucq_object            *obj;
1451         struct ib_uverbs_event_file     *ev_file;
1452         int                              ret = -EINVAL;
1453
1454         if (copy_from_user(&cmd, buf, sizeof cmd))
1455                 return -EFAULT;
1456
1457         uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1458         if (!uobj)
1459                 return -EINVAL;
1460         cq      = uobj->object;
1461         ev_file = cq->cq_context;
1462         obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1463
1464         ret = ib_destroy_cq(cq);
1465         if (!ret)
1466                 uobj->live = 0;
1467
1468         put_uobj_write(uobj);
1469
1470         if (ret)
1471                 return ret;
1472
1473         idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1474
1475         mutex_lock(&file->mutex);
1476         list_del(&uobj->list);
1477         mutex_unlock(&file->mutex);
1478
1479         ib_uverbs_release_ucq(file, ev_file, obj);
1480
1481         memset(&resp, 0, sizeof resp);
1482         resp.comp_events_reported  = obj->comp_events_reported;
1483         resp.async_events_reported = obj->async_events_reported;
1484
1485         put_uobj(uobj);
1486
1487         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1488                          &resp, sizeof resp))
1489                 return -EFAULT;
1490
1491         return in_len;
1492 }
1493
1494 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1495                             const char __user *buf, int in_len,
1496                             int out_len)
1497 {
1498         struct ib_uverbs_create_qp      cmd;
1499         struct ib_uverbs_create_qp_resp resp;
1500         struct ib_udata                 udata;
1501         struct ib_uqp_object           *obj;
1502         struct ib_device               *device;
1503         struct ib_pd                   *pd = NULL;
1504         struct ib_xrcd                 *xrcd = NULL;
1505         struct ib_uobject              *uninitialized_var(xrcd_uobj);
1506         struct ib_cq                   *scq = NULL, *rcq = NULL;
1507         struct ib_srq                  *srq = NULL;
1508         struct ib_qp                   *qp;
1509         struct ib_qp_init_attr          attr;
1510         int ret;
1511
1512         if (out_len < sizeof resp)
1513                 return -ENOSPC;
1514
1515         if (copy_from_user(&cmd, buf, sizeof cmd))
1516                 return -EFAULT;
1517
1518         if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1519                 return -EPERM;
1520
1521         INIT_UDATA(&udata, buf + sizeof cmd,
1522                    (unsigned long) cmd.response + sizeof resp,
1523                    in_len - sizeof cmd, out_len - sizeof resp);
1524
1525         obj = kzalloc(sizeof *obj, GFP_KERNEL);
1526         if (!obj)
1527                 return -ENOMEM;
1528
1529         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1530         down_write(&obj->uevent.uobject.mutex);
1531
1532         if (cmd.qp_type == IB_QPT_XRC_TGT) {
1533                 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1534                 if (!xrcd) {
1535                         ret = -EINVAL;
1536                         goto err_put;
1537                 }
1538                 device = xrcd->device;
1539         } else {
1540                 if (cmd.qp_type == IB_QPT_XRC_INI) {
1541                         cmd.max_recv_wr = cmd.max_recv_sge = 0;
1542                 } else {
1543                         if (cmd.is_srq) {
1544                                 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1545                                 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1546                                         ret = -EINVAL;
1547                                         goto err_put;
1548                                 }
1549                         }
1550
1551                         if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1552                                 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1553                                 if (!rcq) {
1554                                         ret = -EINVAL;
1555                                         goto err_put;
1556                                 }
1557                         }
1558                 }
1559
1560                 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1561                 rcq = rcq ?: scq;
1562                 pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1563                 if (!pd || !scq) {
1564                         ret = -EINVAL;
1565                         goto err_put;
1566                 }
1567
1568                 device = pd->device;
1569         }
1570
1571         attr.event_handler = ib_uverbs_qp_event_handler;
1572         attr.qp_context    = file;
1573         attr.send_cq       = scq;
1574         attr.recv_cq       = rcq;
1575         attr.srq           = srq;
1576         attr.xrcd          = xrcd;
1577         attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1578         attr.qp_type       = cmd.qp_type;
1579         attr.create_flags  = 0;
1580
1581         attr.cap.max_send_wr     = cmd.max_send_wr;
1582         attr.cap.max_recv_wr     = cmd.max_recv_wr;
1583         attr.cap.max_send_sge    = cmd.max_send_sge;
1584         attr.cap.max_recv_sge    = cmd.max_recv_sge;
1585         attr.cap.max_inline_data = cmd.max_inline_data;
1586
1587         obj->uevent.events_reported     = 0;
1588         INIT_LIST_HEAD(&obj->uevent.event_list);
1589         INIT_LIST_HEAD(&obj->mcast_list);
1590
1591         if (cmd.qp_type == IB_QPT_XRC_TGT)
1592                 qp = ib_create_qp(pd, &attr);
1593         else
1594                 qp = device->create_qp(pd, &attr, &udata);
1595
1596         if (IS_ERR(qp)) {
1597                 ret = PTR_ERR(qp);
1598                 goto err_put;
1599         }
1600
1601         if (cmd.qp_type != IB_QPT_XRC_TGT) {
1602                 qp->real_qp       = qp;
1603                 qp->device        = device;
1604                 qp->pd            = pd;
1605                 qp->send_cq       = attr.send_cq;
1606                 qp->recv_cq       = attr.recv_cq;
1607                 qp->srq           = attr.srq;
1608                 qp->event_handler = attr.event_handler;
1609                 qp->qp_context    = attr.qp_context;
1610                 qp->qp_type       = attr.qp_type;
1611                 atomic_set(&qp->usecnt, 0);
1612                 atomic_inc(&pd->usecnt);
1613                 atomic_inc(&attr.send_cq->usecnt);
1614                 if (attr.recv_cq)
1615                         atomic_inc(&attr.recv_cq->usecnt);
1616                 if (attr.srq)
1617                         atomic_inc(&attr.srq->usecnt);
1618         }
1619         qp->uobject = &obj->uevent.uobject;
1620
1621         obj->uevent.uobject.object = qp;
1622         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1623         if (ret)
1624                 goto err_destroy;
1625
1626         memset(&resp, 0, sizeof resp);
1627         resp.qpn             = qp->qp_num;
1628         resp.qp_handle       = obj->uevent.uobject.id;
1629         resp.max_recv_sge    = attr.cap.max_recv_sge;
1630         resp.max_send_sge    = attr.cap.max_send_sge;
1631         resp.max_recv_wr     = attr.cap.max_recv_wr;
1632         resp.max_send_wr     = attr.cap.max_send_wr;
1633         resp.max_inline_data = attr.cap.max_inline_data;
1634
1635         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1636                          &resp, sizeof resp)) {
1637                 ret = -EFAULT;
1638                 goto err_copy;
1639         }
1640
1641         if (xrcd) {
1642                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1643                                           uobject);
1644                 atomic_inc(&obj->uxrcd->refcnt);
1645                 put_xrcd_read(xrcd_uobj);
1646         }
1647
1648         if (pd)
1649                 put_pd_read(pd);
1650         if (scq)
1651                 put_cq_read(scq);
1652         if (rcq && rcq != scq)
1653                 put_cq_read(rcq);
1654         if (srq)
1655                 put_srq_read(srq);
1656
1657         mutex_lock(&file->mutex);
1658         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1659         mutex_unlock(&file->mutex);
1660
1661         obj->uevent.uobject.live = 1;
1662
1663         up_write(&obj->uevent.uobject.mutex);
1664
1665         return in_len;
1666
1667 err_copy:
1668         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1669
1670 err_destroy:
1671         ib_destroy_qp(qp);
1672
1673 err_put:
1674         if (xrcd)
1675                 put_xrcd_read(xrcd_uobj);
1676         if (pd)
1677                 put_pd_read(pd);
1678         if (scq)
1679                 put_cq_read(scq);
1680         if (rcq && rcq != scq)
1681                 put_cq_read(rcq);
1682         if (srq)
1683                 put_srq_read(srq);
1684
1685         put_uobj_write(&obj->uevent.uobject);
1686         return ret;
1687 }
1688
1689 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1690                           const char __user *buf, int in_len, int out_len)
1691 {
1692         struct ib_uverbs_open_qp        cmd;
1693         struct ib_uverbs_create_qp_resp resp;
1694         struct ib_udata                 udata;
1695         struct ib_uqp_object           *obj;
1696         struct ib_xrcd                 *xrcd;
1697         struct ib_uobject              *uninitialized_var(xrcd_uobj);
1698         struct ib_qp                   *qp;
1699         struct ib_qp_open_attr          attr;
1700         int ret;
1701
1702         if (out_len < sizeof resp)
1703                 return -ENOSPC;
1704
1705         if (copy_from_user(&cmd, buf, sizeof cmd))
1706                 return -EFAULT;
1707
1708         INIT_UDATA(&udata, buf + sizeof cmd,
1709                    (unsigned long) cmd.response + sizeof resp,
1710                    in_len - sizeof cmd, out_len - sizeof resp);
1711
1712         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1713         if (!obj)
1714                 return -ENOMEM;
1715
1716         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1717         down_write(&obj->uevent.uobject.mutex);
1718
1719         xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1720         if (!xrcd) {
1721                 ret = -EINVAL;
1722                 goto err_put;
1723         }
1724
1725         attr.event_handler = ib_uverbs_qp_event_handler;
1726         attr.qp_context    = file;
1727         attr.qp_num        = cmd.qpn;
1728         attr.qp_type       = cmd.qp_type;
1729
1730         obj->uevent.events_reported = 0;
1731         INIT_LIST_HEAD(&obj->uevent.event_list);
1732         INIT_LIST_HEAD(&obj->mcast_list);
1733
1734         qp = ib_open_qp(xrcd, &attr);
1735         if (IS_ERR(qp)) {
1736                 ret = PTR_ERR(qp);
1737                 goto err_put;
1738         }
1739
1740         qp->uobject = &obj->uevent.uobject;
1741
1742         obj->uevent.uobject.object = qp;
1743         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1744         if (ret)
1745                 goto err_destroy;
1746
1747         memset(&resp, 0, sizeof resp);
1748         resp.qpn       = qp->qp_num;
1749         resp.qp_handle = obj->uevent.uobject.id;
1750
1751         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1752                          &resp, sizeof resp)) {
1753                 ret = -EFAULT;
1754                 goto err_remove;
1755         }
1756
1757         obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1758         atomic_inc(&obj->uxrcd->refcnt);
1759         put_xrcd_read(xrcd_uobj);
1760
1761         mutex_lock(&file->mutex);
1762         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1763         mutex_unlock(&file->mutex);
1764
1765         obj->uevent.uobject.live = 1;
1766
1767         up_write(&obj->uevent.uobject.mutex);
1768
1769         return in_len;
1770
1771 err_remove:
1772         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1773
1774 err_destroy:
1775         ib_destroy_qp(qp);
1776
1777 err_put:
1778         put_xrcd_read(xrcd_uobj);
1779         put_uobj_write(&obj->uevent.uobject);
1780         return ret;
1781 }
1782
1783 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1784                            const char __user *buf, int in_len,
1785                            int out_len)
1786 {
1787         struct ib_uverbs_query_qp      cmd;
1788         struct ib_uverbs_query_qp_resp resp;
1789         struct ib_qp                   *qp;
1790         struct ib_qp_attr              *attr;
1791         struct ib_qp_init_attr         *init_attr;
1792         int                            ret;
1793
1794         if (copy_from_user(&cmd, buf, sizeof cmd))
1795                 return -EFAULT;
1796
1797         attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1798         init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1799         if (!attr || !init_attr) {
1800                 ret = -ENOMEM;
1801                 goto out;
1802         }
1803
1804         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1805         if (!qp) {
1806                 ret = -EINVAL;
1807                 goto out;
1808         }
1809
1810         ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1811
1812         put_qp_read(qp);
1813
1814         if (ret)
1815                 goto out;
1816
1817         memset(&resp, 0, sizeof resp);
1818
1819         resp.qp_state               = attr->qp_state;
1820         resp.cur_qp_state           = attr->cur_qp_state;
1821         resp.path_mtu               = attr->path_mtu;
1822         resp.path_mig_state         = attr->path_mig_state;
1823         resp.qkey                   = attr->qkey;
1824         resp.rq_psn                 = attr->rq_psn;
1825         resp.sq_psn                 = attr->sq_psn;
1826         resp.dest_qp_num            = attr->dest_qp_num;
1827         resp.qp_access_flags        = attr->qp_access_flags;
1828         resp.pkey_index             = attr->pkey_index;
1829         resp.alt_pkey_index         = attr->alt_pkey_index;
1830         resp.sq_draining            = attr->sq_draining;
1831         resp.max_rd_atomic          = attr->max_rd_atomic;
1832         resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1833         resp.min_rnr_timer          = attr->min_rnr_timer;
1834         resp.port_num               = attr->port_num;
1835         resp.timeout                = attr->timeout;
1836         resp.retry_cnt              = attr->retry_cnt;
1837         resp.rnr_retry              = attr->rnr_retry;
1838         resp.alt_port_num           = attr->alt_port_num;
1839         resp.alt_timeout            = attr->alt_timeout;
1840
1841         memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1842         resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1843         resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1844         resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1845         resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1846         resp.dest.dlid              = attr->ah_attr.dlid;
1847         resp.dest.sl                = attr->ah_attr.sl;
1848         resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1849         resp.dest.static_rate       = attr->ah_attr.static_rate;
1850         resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1851         resp.dest.port_num          = attr->ah_attr.port_num;
1852
1853         memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1854         resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1855         resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1856         resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1857         resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1858         resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1859         resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1860         resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1861         resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1862         resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1863         resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1864
1865         resp.max_send_wr            = init_attr->cap.max_send_wr;
1866         resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1867         resp.max_send_sge           = init_attr->cap.max_send_sge;
1868         resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1869         resp.max_inline_data        = init_attr->cap.max_inline_data;
1870         resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1871
1872         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1873                          &resp, sizeof resp))
1874                 ret = -EFAULT;
1875
1876 out:
1877         kfree(attr);
1878         kfree(init_attr);
1879
1880         return ret ? ret : in_len;
1881 }
1882
1883 /* Remove ignored fields set in the attribute mask */
1884 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1885 {
1886         switch (qp_type) {
1887         case IB_QPT_XRC_INI:
1888                 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1889         case IB_QPT_XRC_TGT:
1890                 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1891                                 IB_QP_RNR_RETRY);
1892         default:
1893                 return mask;
1894         }
1895 }
1896
1897 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1898                             const char __user *buf, int in_len,
1899                             int out_len)
1900 {
1901         struct ib_uverbs_modify_qp cmd;
1902         struct ib_udata            udata;
1903         struct ib_qp              *qp;
1904         struct ib_qp_attr         *attr;
1905         int                        ret;
1906
1907         if (copy_from_user(&cmd, buf, sizeof cmd))
1908                 return -EFAULT;
1909
1910         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1911                    out_len);
1912
1913         attr = kmalloc(sizeof *attr, GFP_KERNEL);
1914         if (!attr)
1915                 return -ENOMEM;
1916
1917         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1918         if (!qp) {
1919                 ret = -EINVAL;
1920                 goto out;
1921         }
1922
1923         attr->qp_state            = cmd.qp_state;
1924         attr->cur_qp_state        = cmd.cur_qp_state;
1925         attr->path_mtu            = cmd.path_mtu;
1926         attr->path_mig_state      = cmd.path_mig_state;
1927         attr->qkey                = cmd.qkey;
1928         attr->rq_psn              = cmd.rq_psn;
1929         attr->sq_psn              = cmd.sq_psn;
1930         attr->dest_qp_num         = cmd.dest_qp_num;
1931         attr->qp_access_flags     = cmd.qp_access_flags;
1932         attr->pkey_index          = cmd.pkey_index;
1933         attr->alt_pkey_index      = cmd.alt_pkey_index;
1934         attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1935         attr->max_rd_atomic       = cmd.max_rd_atomic;
1936         attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
1937         attr->min_rnr_timer       = cmd.min_rnr_timer;
1938         attr->port_num            = cmd.port_num;
1939         attr->timeout             = cmd.timeout;
1940         attr->retry_cnt           = cmd.retry_cnt;
1941         attr->rnr_retry           = cmd.rnr_retry;
1942         attr->alt_port_num        = cmd.alt_port_num;
1943         attr->alt_timeout         = cmd.alt_timeout;
1944
1945         memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1946         attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
1947         attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
1948         attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
1949         attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
1950         attr->ah_attr.dlid                  = cmd.dest.dlid;
1951         attr->ah_attr.sl                    = cmd.dest.sl;
1952         attr->ah_attr.src_path_bits         = cmd.dest.src_path_bits;
1953         attr->ah_attr.static_rate           = cmd.dest.static_rate;
1954         attr->ah_attr.ah_flags              = cmd.dest.is_global ? IB_AH_GRH : 0;
1955         attr->ah_attr.port_num              = cmd.dest.port_num;
1956
1957         memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1958         attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
1959         attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
1960         attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
1961         attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1962         attr->alt_ah_attr.dlid              = cmd.alt_dest.dlid;
1963         attr->alt_ah_attr.sl                = cmd.alt_dest.sl;
1964         attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
1965         attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
1966         attr->alt_ah_attr.ah_flags          = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1967         attr->alt_ah_attr.port_num          = cmd.alt_dest.port_num;
1968
1969         if (qp->real_qp == qp) {
1970                 ret = qp->device->modify_qp(qp, attr,
1971                         modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
1972         } else {
1973                 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
1974         }
1975
1976         put_qp_read(qp);
1977
1978         if (ret)
1979                 goto out;
1980
1981         ret = in_len;
1982
1983 out:
1984         kfree(attr);
1985
1986         return ret;
1987 }
1988
1989 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1990                              const char __user *buf, int in_len,
1991                              int out_len)
1992 {
1993         struct ib_uverbs_destroy_qp      cmd;
1994         struct ib_uverbs_destroy_qp_resp resp;
1995         struct ib_uobject               *uobj;
1996         struct ib_qp                    *qp;
1997         struct ib_uqp_object            *obj;
1998         int                              ret = -EINVAL;
1999
2000         if (copy_from_user(&cmd, buf, sizeof cmd))
2001                 return -EFAULT;
2002
2003         memset(&resp, 0, sizeof resp);
2004
2005         uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2006         if (!uobj)
2007                 return -EINVAL;
2008         qp  = uobj->object;
2009         obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2010
2011         if (!list_empty(&obj->mcast_list)) {
2012                 put_uobj_write(uobj);
2013                 return -EBUSY;
2014         }
2015
2016         ret = ib_destroy_qp(qp);
2017         if (!ret)
2018                 uobj->live = 0;
2019
2020         put_uobj_write(uobj);
2021
2022         if (ret)
2023                 return ret;
2024
2025         if (obj->uxrcd)
2026                 atomic_dec(&obj->uxrcd->refcnt);
2027
2028         idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2029
2030         mutex_lock(&file->mutex);
2031         list_del(&uobj->list);
2032         mutex_unlock(&file->mutex);
2033
2034         ib_uverbs_release_uevent(file, &obj->uevent);
2035
2036         resp.events_reported = obj->uevent.events_reported;
2037
2038         put_uobj(uobj);
2039
2040         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2041                          &resp, sizeof resp))
2042                 return -EFAULT;
2043
2044         return in_len;
2045 }
2046
2047 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2048                             const char __user *buf, int in_len,
2049                             int out_len)
2050 {
2051         struct ib_uverbs_post_send      cmd;
2052         struct ib_uverbs_post_send_resp resp;
2053         struct ib_uverbs_send_wr       *user_wr;
2054         struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
2055         struct ib_qp                   *qp;
2056         int                             i, sg_ind;
2057         int                             is_ud;
2058         ssize_t                         ret = -EINVAL;
2059
2060         if (copy_from_user(&cmd, buf, sizeof cmd))
2061                 return -EFAULT;
2062
2063         if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2064             cmd.sge_count * sizeof (struct ib_uverbs_sge))
2065                 return -EINVAL;
2066
2067         if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2068                 return -EINVAL;
2069
2070         user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2071         if (!user_wr)
2072                 return -ENOMEM;
2073
2074         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2075         if (!qp)
2076                 goto out;
2077
2078         is_ud = qp->qp_type == IB_QPT_UD;
2079         sg_ind = 0;
2080         last = NULL;
2081         for (i = 0; i < cmd.wr_count; ++i) {
2082                 if (copy_from_user(user_wr,
2083                                    buf + sizeof cmd + i * cmd.wqe_size,
2084                                    cmd.wqe_size)) {
2085                         ret = -EFAULT;
2086                         goto out_put;
2087                 }
2088
2089                 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2090                         ret = -EINVAL;
2091                         goto out_put;
2092                 }
2093
2094                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2095                                user_wr->num_sge * sizeof (struct ib_sge),
2096                                GFP_KERNEL);
2097                 if (!next) {
2098                         ret = -ENOMEM;
2099                         goto out_put;
2100                 }
2101
2102                 if (!last)
2103                         wr = next;
2104                 else
2105                         last->next = next;
2106                 last = next;
2107
2108                 next->next       = NULL;
2109                 next->wr_id      = user_wr->wr_id;
2110                 next->num_sge    = user_wr->num_sge;
2111                 next->opcode     = user_wr->opcode;
2112                 next->send_flags = user_wr->send_flags;
2113
2114                 if (is_ud) {
2115                         next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2116                                                      file->ucontext);
2117                         if (!next->wr.ud.ah) {
2118                                 ret = -EINVAL;
2119                                 goto out_put;
2120                         }
2121                         next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
2122                         next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2123                 } else {
2124                         switch (next->opcode) {
2125                         case IB_WR_RDMA_WRITE_WITH_IMM:
2126                                 next->ex.imm_data =
2127                                         (__be32 __force) user_wr->ex.imm_data;
2128                         case IB_WR_RDMA_WRITE:
2129                         case IB_WR_RDMA_READ:
2130                                 next->wr.rdma.remote_addr =
2131                                         user_wr->wr.rdma.remote_addr;
2132                                 next->wr.rdma.rkey        =
2133                                         user_wr->wr.rdma.rkey;
2134                                 break;
2135                         case IB_WR_SEND_WITH_IMM:
2136                                 next->ex.imm_data =
2137                                         (__be32 __force) user_wr->ex.imm_data;
2138                                 break;
2139                         case IB_WR_SEND_WITH_INV:
2140                                 next->ex.invalidate_rkey =
2141                                         user_wr->ex.invalidate_rkey;
2142                                 break;
2143                         case IB_WR_ATOMIC_CMP_AND_SWP:
2144                         case IB_WR_ATOMIC_FETCH_AND_ADD:
2145                                 next->wr.atomic.remote_addr =
2146                                         user_wr->wr.atomic.remote_addr;
2147                                 next->wr.atomic.compare_add =
2148                                         user_wr->wr.atomic.compare_add;
2149                                 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2150                                 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2151                                 break;
2152                         default:
2153                                 break;
2154                         }
2155                 }
2156
2157                 if (next->num_sge) {
2158                         next->sg_list = (void *) next +
2159                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2160                         if (copy_from_user(next->sg_list,
2161                                            buf + sizeof cmd +
2162                                            cmd.wr_count * cmd.wqe_size +
2163                                            sg_ind * sizeof (struct ib_sge),
2164                                            next->num_sge * sizeof (struct ib_sge))) {
2165                                 ret = -EFAULT;
2166                                 goto out_put;
2167                         }
2168                         sg_ind += next->num_sge;
2169                 } else
2170                         next->sg_list = NULL;
2171         }
2172
2173         resp.bad_wr = 0;
2174         ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2175         if (ret)
2176                 for (next = wr; next; next = next->next) {
2177                         ++resp.bad_wr;
2178                         if (next == bad_wr)
2179                                 break;
2180                 }
2181
2182         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2183                          &resp, sizeof resp))
2184                 ret = -EFAULT;
2185
2186 out_put:
2187         put_qp_read(qp);
2188
2189         while (wr) {
2190                 if (is_ud && wr->wr.ud.ah)
2191                         put_ah_read(wr->wr.ud.ah);
2192                 next = wr->next;
2193                 kfree(wr);
2194                 wr = next;
2195         }
2196
2197 out:
2198         kfree(user_wr);
2199
2200         return ret ? ret : in_len;
2201 }
2202
2203 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2204                                                     int in_len,
2205                                                     u32 wr_count,
2206                                                     u32 sge_count,
2207                                                     u32 wqe_size)
2208 {
2209         struct ib_uverbs_recv_wr *user_wr;
2210         struct ib_recv_wr        *wr = NULL, *last, *next;
2211         int                       sg_ind;
2212         int                       i;
2213         int                       ret;
2214
2215         if (in_len < wqe_size * wr_count +
2216             sge_count * sizeof (struct ib_uverbs_sge))
2217                 return ERR_PTR(-EINVAL);
2218
2219         if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2220                 return ERR_PTR(-EINVAL);
2221
2222         user_wr = kmalloc(wqe_size, GFP_KERNEL);
2223         if (!user_wr)
2224                 return ERR_PTR(-ENOMEM);
2225
2226         sg_ind = 0;
2227         last = NULL;
2228         for (i = 0; i < wr_count; ++i) {
2229                 if (copy_from_user(user_wr, buf + i * wqe_size,
2230                                    wqe_size)) {
2231                         ret = -EFAULT;
2232                         goto err;
2233                 }
2234
2235                 if (user_wr->num_sge + sg_ind > sge_count) {
2236                         ret = -EINVAL;
2237                         goto err;
2238                 }
2239
2240                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2241                                user_wr->num_sge * sizeof (struct ib_sge),
2242                                GFP_KERNEL);
2243                 if (!next) {
2244                         ret = -ENOMEM;
2245                         goto err;
2246                 }
2247
2248                 if (!last)
2249                         wr = next;
2250                 else
2251                         last->next = next;
2252                 last = next;
2253
2254                 next->next       = NULL;
2255                 next->wr_id      = user_wr->wr_id;
2256                 next->num_sge    = user_wr->num_sge;
2257
2258                 if (next->num_sge) {
2259                         next->sg_list = (void *) next +
2260                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2261                         if (copy_from_user(next->sg_list,
2262                                            buf + wr_count * wqe_size +
2263                                            sg_ind * sizeof (struct ib_sge),
2264                                            next->num_sge * sizeof (struct ib_sge))) {
2265                                 ret = -EFAULT;
2266                                 goto err;
2267                         }
2268                         sg_ind += next->num_sge;
2269                 } else
2270                         next->sg_list = NULL;
2271         }
2272
2273         kfree(user_wr);
2274         return wr;
2275
2276 err:
2277         kfree(user_wr);
2278
2279         while (wr) {
2280                 next = wr->next;
2281                 kfree(wr);
2282                 wr = next;
2283         }
2284
2285         return ERR_PTR(ret);
2286 }
2287
2288 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2289                             const char __user *buf, int in_len,
2290                             int out_len)
2291 {
2292         struct ib_uverbs_post_recv      cmd;
2293         struct ib_uverbs_post_recv_resp resp;
2294         struct ib_recv_wr              *wr, *next, *bad_wr;
2295         struct ib_qp                   *qp;
2296         ssize_t                         ret = -EINVAL;
2297
2298         if (copy_from_user(&cmd, buf, sizeof cmd))
2299                 return -EFAULT;
2300
2301         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2302                                        in_len - sizeof cmd, cmd.wr_count,
2303                                        cmd.sge_count, cmd.wqe_size);
2304         if (IS_ERR(wr))
2305                 return PTR_ERR(wr);
2306
2307         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2308         if (!qp)
2309                 goto out;
2310
2311         resp.bad_wr = 0;
2312         ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2313
2314         put_qp_read(qp);
2315
2316         if (ret)
2317                 for (next = wr; next; next = next->next) {
2318                         ++resp.bad_wr;
2319                         if (next == bad_wr)
2320                                 break;
2321                 }
2322
2323         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2324                          &resp, sizeof resp))
2325                 ret = -EFAULT;
2326
2327 out:
2328         while (wr) {
2329                 next = wr->next;
2330                 kfree(wr);
2331                 wr = next;
2332         }
2333
2334         return ret ? ret : in_len;
2335 }
2336
2337 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2338                                 const char __user *buf, int in_len,
2339                                 int out_len)
2340 {
2341         struct ib_uverbs_post_srq_recv      cmd;
2342         struct ib_uverbs_post_srq_recv_resp resp;
2343         struct ib_recv_wr                  *wr, *next, *bad_wr;
2344         struct ib_srq                      *srq;
2345         ssize_t                             ret = -EINVAL;
2346
2347         if (copy_from_user(&cmd, buf, sizeof cmd))
2348                 return -EFAULT;
2349
2350         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2351                                        in_len - sizeof cmd, cmd.wr_count,
2352                                        cmd.sge_count, cmd.wqe_size);
2353         if (IS_ERR(wr))
2354                 return PTR_ERR(wr);
2355
2356         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2357         if (!srq)
2358                 goto out;
2359
2360         resp.bad_wr = 0;
2361         ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2362
2363         put_srq_read(srq);
2364
2365         if (ret)
2366                 for (next = wr; next; next = next->next) {
2367                         ++resp.bad_wr;
2368                         if (next == bad_wr)
2369                                 break;
2370                 }
2371
2372         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2373                          &resp, sizeof resp))
2374                 ret = -EFAULT;
2375
2376 out:
2377         while (wr) {
2378                 next = wr->next;
2379                 kfree(wr);
2380                 wr = next;
2381         }
2382
2383         return ret ? ret : in_len;
2384 }
2385
2386 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2387                             const char __user *buf, int in_len,
2388                             int out_len)
2389 {
2390         struct ib_uverbs_create_ah       cmd;
2391         struct ib_uverbs_create_ah_resp  resp;
2392         struct ib_uobject               *uobj;
2393         struct ib_pd                    *pd;
2394         struct ib_ah                    *ah;
2395         struct ib_ah_attr               attr;
2396         int ret;
2397
2398         if (out_len < sizeof resp)
2399                 return -ENOSPC;
2400
2401         if (copy_from_user(&cmd, buf, sizeof cmd))
2402                 return -EFAULT;
2403
2404         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2405         if (!uobj)
2406                 return -ENOMEM;
2407
2408         init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2409         down_write(&uobj->mutex);
2410
2411         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2412         if (!pd) {
2413                 ret = -EINVAL;
2414                 goto err;
2415         }
2416
2417         attr.dlid              = cmd.attr.dlid;
2418         attr.sl                = cmd.attr.sl;
2419         attr.src_path_bits     = cmd.attr.src_path_bits;
2420         attr.static_rate       = cmd.attr.static_rate;
2421         attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
2422         attr.port_num          = cmd.attr.port_num;
2423         attr.grh.flow_label    = cmd.attr.grh.flow_label;
2424         attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
2425         attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
2426         attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2427         memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2428
2429         ah = ib_create_ah(pd, &attr);
2430         if (IS_ERR(ah)) {
2431                 ret = PTR_ERR(ah);
2432                 goto err_put;
2433         }
2434
2435         ah->uobject  = uobj;
2436         uobj->object = ah;
2437
2438         ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2439         if (ret)
2440                 goto err_destroy;
2441
2442         resp.ah_handle = uobj->id;
2443
2444         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2445                          &resp, sizeof resp)) {
2446                 ret = -EFAULT;
2447                 goto err_copy;
2448         }
2449
2450         put_pd_read(pd);
2451
2452         mutex_lock(&file->mutex);
2453         list_add_tail(&uobj->list, &file->ucontext->ah_list);
2454         mutex_unlock(&file->mutex);
2455
2456         uobj->live = 1;
2457
2458         up_write(&uobj->mutex);
2459
2460         return in_len;
2461
2462 err_copy:
2463         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2464
2465 err_destroy:
2466         ib_destroy_ah(ah);
2467
2468 err_put:
2469         put_pd_read(pd);
2470
2471 err:
2472         put_uobj_write(uobj);
2473         return ret;
2474 }
2475
2476 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2477                              const char __user *buf, int in_len, int out_len)
2478 {
2479         struct ib_uverbs_destroy_ah cmd;
2480         struct ib_ah               *ah;
2481         struct ib_uobject          *uobj;
2482         int                         ret;
2483
2484         if (copy_from_user(&cmd, buf, sizeof cmd))
2485                 return -EFAULT;
2486
2487         uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2488         if (!uobj)
2489                 return -EINVAL;
2490         ah = uobj->object;
2491
2492         ret = ib_destroy_ah(ah);
2493         if (!ret)
2494                 uobj->live = 0;
2495
2496         put_uobj_write(uobj);
2497
2498         if (ret)
2499                 return ret;
2500
2501         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2502
2503         mutex_lock(&file->mutex);
2504         list_del(&uobj->list);
2505         mutex_unlock(&file->mutex);
2506
2507         put_uobj(uobj);
2508
2509         return in_len;
2510 }
2511
2512 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2513                                const char __user *buf, int in_len,
2514                                int out_len)
2515 {
2516         struct ib_uverbs_attach_mcast cmd;
2517         struct ib_qp                 *qp;
2518         struct ib_uqp_object         *obj;
2519         struct ib_uverbs_mcast_entry *mcast;
2520         int                           ret;
2521
2522         if (copy_from_user(&cmd, buf, sizeof cmd))
2523                 return -EFAULT;
2524
2525         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2526         if (!qp)
2527                 return -EINVAL;
2528
2529         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2530
2531         list_for_each_entry(mcast, &obj->mcast_list, list)
2532                 if (cmd.mlid == mcast->lid &&
2533                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2534                         ret = 0;
2535                         goto out_put;
2536                 }
2537
2538         mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2539         if (!mcast) {
2540                 ret = -ENOMEM;
2541                 goto out_put;
2542         }
2543
2544         mcast->lid = cmd.mlid;
2545         memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2546
2547         ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2548         if (!ret)
2549                 list_add_tail(&mcast->list, &obj->mcast_list);
2550         else
2551                 kfree(mcast);
2552
2553 out_put:
2554         put_qp_write(qp);
2555
2556         return ret ? ret : in_len;
2557 }
2558
2559 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2560                                const char __user *buf, int in_len,
2561                                int out_len)
2562 {
2563         struct ib_uverbs_detach_mcast cmd;
2564         struct ib_uqp_object         *obj;
2565         struct ib_qp                 *qp;
2566         struct ib_uverbs_mcast_entry *mcast;
2567         int                           ret = -EINVAL;
2568
2569         if (copy_from_user(&cmd, buf, sizeof cmd))
2570                 return -EFAULT;
2571
2572         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2573         if (!qp)
2574                 return -EINVAL;
2575
2576         ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2577         if (ret)
2578                 goto out_put;
2579
2580         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2581
2582         list_for_each_entry(mcast, &obj->mcast_list, list)
2583                 if (cmd.mlid == mcast->lid &&
2584                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2585                         list_del(&mcast->list);
2586                         kfree(mcast);
2587                         break;
2588                 }
2589
2590 out_put:
2591         put_qp_write(qp);
2592
2593         return ret ? ret : in_len;
2594 }
2595
2596 #ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
2597 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2598                                 union ib_flow_spec *ib_spec)
2599 {
2600         ib_spec->type = kern_spec->type;
2601
2602         switch (ib_spec->type) {
2603         case IB_FLOW_SPEC_ETH:
2604                 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2605                 if (ib_spec->eth.size != kern_spec->eth.size)
2606                         return -EINVAL;
2607                 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2608                        sizeof(struct ib_flow_eth_filter));
2609                 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2610                        sizeof(struct ib_flow_eth_filter));
2611                 break;
2612         case IB_FLOW_SPEC_IPV4:
2613                 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2614                 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2615                         return -EINVAL;
2616                 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2617                        sizeof(struct ib_flow_ipv4_filter));
2618                 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2619                        sizeof(struct ib_flow_ipv4_filter));
2620                 break;
2621         case IB_FLOW_SPEC_TCP:
2622         case IB_FLOW_SPEC_UDP:
2623                 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2624                 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2625                         return -EINVAL;
2626                 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2627                        sizeof(struct ib_flow_tcp_udp_filter));
2628                 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2629                        sizeof(struct ib_flow_tcp_udp_filter));
2630                 break;
2631         default:
2632                 return -EINVAL;
2633         }
2634         return 0;
2635 }
2636
2637 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2638                              struct ib_udata *ucore,
2639                              struct ib_udata *uhw)
2640 {
2641         struct ib_uverbs_create_flow      cmd;
2642         struct ib_uverbs_create_flow_resp resp;
2643         struct ib_uobject                 *uobj;
2644         struct ib_flow                    *flow_id;
2645         struct ib_uverbs_flow_attr        *kern_flow_attr;
2646         struct ib_flow_attr               *flow_attr;
2647         struct ib_qp                      *qp;
2648         int err = 0;
2649         void *kern_spec;
2650         void *ib_spec;
2651         int i;
2652
2653         if (ucore->outlen < sizeof(resp))
2654                 return -ENOSPC;
2655
2656         err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2657         if (err)
2658                 return err;
2659
2660         ucore->inbuf += sizeof(cmd);
2661         ucore->inlen -= sizeof(cmd);
2662
2663         if (cmd.comp_mask)
2664                 return -EINVAL;
2665
2666         if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2667              !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2668                 return -EPERM;
2669
2670         if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2671                 return -EINVAL;
2672
2673         if (cmd.flow_attr.size > ucore->inlen ||
2674             cmd.flow_attr.size >
2675             (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2676                 return -EINVAL;
2677
2678         if (cmd.flow_attr.num_of_specs) {
2679                 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2680                                          GFP_KERNEL);
2681                 if (!kern_flow_attr)
2682                         return -ENOMEM;
2683
2684                 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2685                 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2686                                          cmd.flow_attr.size);
2687                 if (err)
2688                         goto err_free_attr;
2689         } else {
2690                 kern_flow_attr = &cmd.flow_attr;
2691         }
2692
2693         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2694         if (!uobj) {
2695                 err = -ENOMEM;
2696                 goto err_free_attr;
2697         }
2698         init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2699         down_write(&uobj->mutex);
2700
2701         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2702         if (!qp) {
2703                 err = -EINVAL;
2704                 goto err_uobj;
2705         }
2706
2707         flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2708         if (!flow_attr) {
2709                 err = -ENOMEM;
2710                 goto err_put;
2711         }
2712
2713         flow_attr->type = kern_flow_attr->type;
2714         flow_attr->priority = kern_flow_attr->priority;
2715         flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2716         flow_attr->port = kern_flow_attr->port;
2717         flow_attr->flags = kern_flow_attr->flags;
2718         flow_attr->size = sizeof(*flow_attr);
2719
2720         kern_spec = kern_flow_attr + 1;
2721         ib_spec = flow_attr + 1;
2722         for (i = 0; i < flow_attr->num_of_specs &&
2723              cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
2724              cmd.flow_attr.size >=
2725              ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
2726                 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2727                 if (err)
2728                         goto err_free;
2729                 flow_attr->size +=
2730                         ((union ib_flow_spec *) ib_spec)->size;
2731                 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2732                 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
2733                 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2734         }
2735         if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2736                 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2737                         i, cmd.flow_attr.size);
2738                 goto err_free;
2739         }
2740         flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2741         if (IS_ERR(flow_id)) {
2742                 err = PTR_ERR(flow_id);
2743                 goto err_free;
2744         }
2745         flow_id->qp = qp;
2746         flow_id->uobject = uobj;
2747         uobj->object = flow_id;
2748
2749         err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2750         if (err)
2751                 goto destroy_flow;
2752
2753         memset(&resp, 0, sizeof(resp));
2754         resp.flow_handle = uobj->id;
2755
2756         err = ib_copy_to_udata(ucore,
2757                                &resp, sizeof(resp));
2758         if (err)
2759                 goto err_copy;
2760
2761         put_qp_read(qp);
2762         mutex_lock(&file->mutex);
2763         list_add_tail(&uobj->list, &file->ucontext->rule_list);
2764         mutex_unlock(&file->mutex);
2765
2766         uobj->live = 1;
2767
2768         up_write(&uobj->mutex);
2769         kfree(flow_attr);
2770         if (cmd.flow_attr.num_of_specs)
2771                 kfree(kern_flow_attr);
2772         return 0;
2773 err_copy:
2774         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2775 destroy_flow:
2776         ib_destroy_flow(flow_id);
2777 err_free:
2778         kfree(flow_attr);
2779 err_put:
2780         put_qp_read(qp);
2781 err_uobj:
2782         put_uobj_write(uobj);
2783 err_free_attr:
2784         if (cmd.flow_attr.num_of_specs)
2785                 kfree(kern_flow_attr);
2786         return err;
2787 }
2788
2789 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2790                               struct ib_udata *ucore,
2791                               struct ib_udata *uhw)
2792 {
2793         struct ib_uverbs_destroy_flow   cmd;
2794         struct ib_flow                  *flow_id;
2795         struct ib_uobject               *uobj;
2796         int                             ret;
2797
2798         ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2799         if (ret)
2800                 return ret;
2801
2802         uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2803                               file->ucontext);
2804         if (!uobj)
2805                 return -EINVAL;
2806         flow_id = uobj->object;
2807
2808         ret = ib_destroy_flow(flow_id);
2809         if (!ret)
2810                 uobj->live = 0;
2811
2812         put_uobj_write(uobj);
2813
2814         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2815
2816         mutex_lock(&file->mutex);
2817         list_del(&uobj->list);
2818         mutex_unlock(&file->mutex);
2819
2820         put_uobj(uobj);
2821
2822         return ret;
2823 }
2824 #endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
2825
2826 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2827                                 struct ib_uverbs_create_xsrq *cmd,
2828                                 struct ib_udata *udata)
2829 {
2830         struct ib_uverbs_create_srq_resp resp;
2831         struct ib_usrq_object           *obj;
2832         struct ib_pd                    *pd;
2833         struct ib_srq                   *srq;
2834         struct ib_uobject               *uninitialized_var(xrcd_uobj);
2835         struct ib_srq_init_attr          attr;
2836         int ret;
2837
2838         obj = kmalloc(sizeof *obj, GFP_KERNEL);
2839         if (!obj)
2840                 return -ENOMEM;
2841
2842         init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
2843         down_write(&obj->uevent.uobject.mutex);
2844
2845         if (cmd->srq_type == IB_SRQT_XRC) {
2846                 attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2847                 if (!attr.ext.xrc.xrcd) {
2848                         ret = -EINVAL;
2849                         goto err;
2850                 }
2851
2852                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2853                 atomic_inc(&obj->uxrcd->refcnt);
2854
2855                 attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2856                 if (!attr.ext.xrc.cq) {
2857                         ret = -EINVAL;
2858                         goto err_put_xrcd;
2859                 }
2860         }
2861
2862         pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
2863         if (!pd) {
2864                 ret = -EINVAL;
2865                 goto err_put_cq;
2866         }
2867
2868         attr.event_handler  = ib_uverbs_srq_event_handler;
2869         attr.srq_context    = file;
2870         attr.srq_type       = cmd->srq_type;
2871         attr.attr.max_wr    = cmd->max_wr;
2872         attr.attr.max_sge   = cmd->max_sge;
2873         attr.attr.srq_limit = cmd->srq_limit;
2874
2875         obj->uevent.events_reported = 0;
2876         INIT_LIST_HEAD(&obj->uevent.event_list);
2877
2878         srq = pd->device->create_srq(pd, &attr, udata);
2879         if (IS_ERR(srq)) {
2880                 ret = PTR_ERR(srq);
2881                 goto err_put;
2882         }
2883
2884         srq->device        = pd->device;
2885         srq->pd            = pd;
2886         srq->srq_type      = cmd->srq_type;
2887         srq->uobject       = &obj->uevent.uobject;
2888         srq->event_handler = attr.event_handler;
2889         srq->srq_context   = attr.srq_context;
2890
2891         if (cmd->srq_type == IB_SRQT_XRC) {
2892                 srq->ext.xrc.cq   = attr.ext.xrc.cq;
2893                 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
2894                 atomic_inc(&attr.ext.xrc.cq->usecnt);
2895                 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
2896         }
2897
2898         atomic_inc(&pd->usecnt);
2899         atomic_set(&srq->usecnt, 0);
2900
2901         obj->uevent.uobject.object = srq;
2902         ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2903         if (ret)
2904                 goto err_destroy;
2905
2906         memset(&resp, 0, sizeof resp);
2907         resp.srq_handle = obj->uevent.uobject.id;
2908         resp.max_wr     = attr.attr.max_wr;
2909         resp.max_sge    = attr.attr.max_sge;
2910         if (cmd->srq_type == IB_SRQT_XRC)
2911                 resp.srqn = srq->ext.xrc.srq_num;
2912
2913         if (copy_to_user((void __user *) (unsigned long) cmd->response,
2914                          &resp, sizeof resp)) {
2915                 ret = -EFAULT;
2916                 goto err_copy;
2917         }
2918
2919         if (cmd->srq_type == IB_SRQT_XRC) {
2920                 put_uobj_read(xrcd_uobj);
2921                 put_cq_read(attr.ext.xrc.cq);
2922         }
2923         put_pd_read(pd);
2924
2925         mutex_lock(&file->mutex);
2926         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
2927         mutex_unlock(&file->mutex);
2928
2929         obj->uevent.uobject.live = 1;
2930
2931         up_write(&obj->uevent.uobject.mutex);
2932
2933         return 0;
2934
2935 err_copy:
2936         idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2937
2938 err_destroy:
2939         ib_destroy_srq(srq);
2940
2941 err_put:
2942         put_pd_read(pd);
2943
2944 err_put_cq:
2945         if (cmd->srq_type == IB_SRQT_XRC)
2946                 put_cq_read(attr.ext.xrc.cq);
2947
2948 err_put_xrcd:
2949         if (cmd->srq_type == IB_SRQT_XRC) {
2950                 atomic_dec(&obj->uxrcd->refcnt);
2951                 put_uobj_read(xrcd_uobj);
2952         }
2953
2954 err:
2955         put_uobj_write(&obj->uevent.uobject);
2956         return ret;
2957 }
2958
2959 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
2960                              const char __user *buf, int in_len,
2961                              int out_len)
2962 {
2963         struct ib_uverbs_create_srq      cmd;
2964         struct ib_uverbs_create_xsrq     xcmd;
2965         struct ib_uverbs_create_srq_resp resp;
2966         struct ib_udata                  udata;
2967         int ret;
2968
2969         if (out_len < sizeof resp)
2970                 return -ENOSPC;
2971
2972         if (copy_from_user(&cmd, buf, sizeof cmd))
2973                 return -EFAULT;
2974
2975         xcmd.response    = cmd.response;
2976         xcmd.user_handle = cmd.user_handle;
2977         xcmd.srq_type    = IB_SRQT_BASIC;
2978         xcmd.pd_handle   = cmd.pd_handle;
2979         xcmd.max_wr      = cmd.max_wr;
2980         xcmd.max_sge     = cmd.max_sge;
2981         xcmd.srq_limit   = cmd.srq_limit;
2982
2983         INIT_UDATA(&udata, buf + sizeof cmd,
2984                    (unsigned long) cmd.response + sizeof resp,
2985                    in_len - sizeof cmd, out_len - sizeof resp);
2986
2987         ret = __uverbs_create_xsrq(file, &xcmd, &udata);
2988         if (ret)
2989                 return ret;
2990
2991         return in_len;
2992 }
2993
2994 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
2995                               const char __user *buf, int in_len, int out_len)
2996 {
2997         struct ib_uverbs_create_xsrq     cmd;
2998         struct ib_uverbs_create_srq_resp resp;
2999         struct ib_udata                  udata;
3000         int ret;
3001
3002         if (out_len < sizeof resp)
3003                 return -ENOSPC;
3004
3005         if (copy_from_user(&cmd, buf, sizeof cmd))
3006                 return -EFAULT;
3007
3008         INIT_UDATA(&udata, buf + sizeof cmd,
3009                    (unsigned long) cmd.response + sizeof resp,
3010                    in_len - sizeof cmd, out_len - sizeof resp);
3011
3012         ret = __uverbs_create_xsrq(file, &cmd, &udata);
3013         if (ret)
3014                 return ret;
3015
3016         return in_len;
3017 }
3018
3019 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3020                              const char __user *buf, int in_len,
3021                              int out_len)
3022 {
3023         struct ib_uverbs_modify_srq cmd;
3024         struct ib_udata             udata;
3025         struct ib_srq              *srq;
3026         struct ib_srq_attr          attr;
3027         int                         ret;
3028
3029         if (copy_from_user(&cmd, buf, sizeof cmd))
3030                 return -EFAULT;
3031
3032         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3033                    out_len);
3034
3035         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3036         if (!srq)
3037                 return -EINVAL;
3038
3039         attr.max_wr    = cmd.max_wr;
3040         attr.srq_limit = cmd.srq_limit;
3041
3042         ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3043
3044         put_srq_read(srq);
3045
3046         return ret ? ret : in_len;
3047 }
3048
3049 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3050                             const char __user *buf,
3051                             int in_len, int out_len)
3052 {
3053         struct ib_uverbs_query_srq      cmd;
3054         struct ib_uverbs_query_srq_resp resp;
3055         struct ib_srq_attr              attr;
3056         struct ib_srq                   *srq;
3057         int                             ret;
3058
3059         if (out_len < sizeof resp)
3060                 return -ENOSPC;
3061
3062         if (copy_from_user(&cmd, buf, sizeof cmd))
3063                 return -EFAULT;
3064
3065         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3066         if (!srq)
3067                 return -EINVAL;
3068
3069         ret = ib_query_srq(srq, &attr);
3070
3071         put_srq_read(srq);
3072
3073         if (ret)
3074                 return ret;
3075
3076         memset(&resp, 0, sizeof resp);
3077
3078         resp.max_wr    = attr.max_wr;
3079         resp.max_sge   = attr.max_sge;
3080         resp.srq_limit = attr.srq_limit;
3081
3082         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3083                          &resp, sizeof resp))
3084                 return -EFAULT;
3085
3086         return in_len;
3087 }
3088
3089 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3090                               const char __user *buf, int in_len,
3091                               int out_len)
3092 {
3093         struct ib_uverbs_destroy_srq      cmd;
3094         struct ib_uverbs_destroy_srq_resp resp;
3095         struct ib_uobject                *uobj;
3096         struct ib_srq                    *srq;
3097         struct ib_uevent_object          *obj;
3098         int                               ret = -EINVAL;
3099         struct ib_usrq_object            *us;
3100         enum ib_srq_type                  srq_type;
3101
3102         if (copy_from_user(&cmd, buf, sizeof cmd))
3103                 return -EFAULT;
3104
3105         uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3106         if (!uobj)
3107                 return -EINVAL;
3108         srq = uobj->object;
3109         obj = container_of(uobj, struct ib_uevent_object, uobject);
3110         srq_type = srq->srq_type;
3111
3112         ret = ib_destroy_srq(srq);
3113         if (!ret)
3114                 uobj->live = 0;
3115
3116         put_uobj_write(uobj);
3117
3118         if (ret)
3119                 return ret;
3120
3121         if (srq_type == IB_SRQT_XRC) {
3122                 us = container_of(obj, struct ib_usrq_object, uevent);
3123                 atomic_dec(&us->uxrcd->refcnt);
3124         }
3125
3126         idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3127
3128         mutex_lock(&file->mutex);
3129         list_del(&uobj->list);
3130         mutex_unlock(&file->mutex);
3131
3132         ib_uverbs_release_uevent(file, obj);
3133
3134         memset(&resp, 0, sizeof resp);
3135         resp.events_reported = obj->events_reported;
3136
3137         put_uobj(uobj);
3138
3139         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3140                          &resp, sizeof resp))
3141                 ret = -EFAULT;
3142
3143         return ret ? ret : in_len;
3144 }