IB/core: Add support for extended query device caps
[cascardo/linux.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39
40 #include <asm/uaccess.h>
41
42 #include "uverbs.h"
43 #include "core_priv.h"
44
45 struct uverbs_lock_class {
46         struct lock_class_key   key;
47         char                    name[16];
48 };
49
50 static struct uverbs_lock_class pd_lock_class   = { .name = "PD-uobj" };
51 static struct uverbs_lock_class mr_lock_class   = { .name = "MR-uobj" };
52 static struct uverbs_lock_class mw_lock_class   = { .name = "MW-uobj" };
53 static struct uverbs_lock_class cq_lock_class   = { .name = "CQ-uobj" };
54 static struct uverbs_lock_class qp_lock_class   = { .name = "QP-uobj" };
55 static struct uverbs_lock_class ah_lock_class   = { .name = "AH-uobj" };
56 static struct uverbs_lock_class srq_lock_class  = { .name = "SRQ-uobj" };
57 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
58 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
59
60 /*
61  * The ib_uobject locking scheme is as follows:
62  *
63  * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
64  *   needs to be held during all idr operations.  When an object is
65  *   looked up, a reference must be taken on the object's kref before
66  *   dropping this lock.
67  *
68  * - Each object also has an rwsem.  This rwsem must be held for
69  *   reading while an operation that uses the object is performed.
70  *   For example, while registering an MR, the associated PD's
71  *   uobject.mutex must be held for reading.  The rwsem must be held
72  *   for writing while initializing or destroying an object.
73  *
74  * - In addition, each object has a "live" flag.  If this flag is not
75  *   set, then lookups of the object will fail even if it is found in
76  *   the idr.  This handles a reader that blocks and does not acquire
77  *   the rwsem until after the object is destroyed.  The destroy
78  *   operation will set the live flag to 0 and then drop the rwsem;
79  *   this will allow the reader to acquire the rwsem, see that the
80  *   live flag is 0, and then drop the rwsem and its reference to
81  *   object.  The underlying storage will not be freed until the last
82  *   reference to the object is dropped.
83  */
84
85 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
86                       struct ib_ucontext *context, struct uverbs_lock_class *c)
87 {
88         uobj->user_handle = user_handle;
89         uobj->context     = context;
90         kref_init(&uobj->ref);
91         init_rwsem(&uobj->mutex);
92         lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
93         uobj->live        = 0;
94 }
95
96 static void release_uobj(struct kref *kref)
97 {
98         kfree(container_of(kref, struct ib_uobject, ref));
99 }
100
101 static void put_uobj(struct ib_uobject *uobj)
102 {
103         kref_put(&uobj->ref, release_uobj);
104 }
105
106 static void put_uobj_read(struct ib_uobject *uobj)
107 {
108         up_read(&uobj->mutex);
109         put_uobj(uobj);
110 }
111
112 static void put_uobj_write(struct ib_uobject *uobj)
113 {
114         up_write(&uobj->mutex);
115         put_uobj(uobj);
116 }
117
118 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
119 {
120         int ret;
121
122         idr_preload(GFP_KERNEL);
123         spin_lock(&ib_uverbs_idr_lock);
124
125         ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
126         if (ret >= 0)
127                 uobj->id = ret;
128
129         spin_unlock(&ib_uverbs_idr_lock);
130         idr_preload_end();
131
132         return ret < 0 ? ret : 0;
133 }
134
135 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
136 {
137         spin_lock(&ib_uverbs_idr_lock);
138         idr_remove(idr, uobj->id);
139         spin_unlock(&ib_uverbs_idr_lock);
140 }
141
142 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
143                                          struct ib_ucontext *context)
144 {
145         struct ib_uobject *uobj;
146
147         spin_lock(&ib_uverbs_idr_lock);
148         uobj = idr_find(idr, id);
149         if (uobj) {
150                 if (uobj->context == context)
151                         kref_get(&uobj->ref);
152                 else
153                         uobj = NULL;
154         }
155         spin_unlock(&ib_uverbs_idr_lock);
156
157         return uobj;
158 }
159
160 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
161                                         struct ib_ucontext *context, int nested)
162 {
163         struct ib_uobject *uobj;
164
165         uobj = __idr_get_uobj(idr, id, context);
166         if (!uobj)
167                 return NULL;
168
169         if (nested)
170                 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
171         else
172                 down_read(&uobj->mutex);
173         if (!uobj->live) {
174                 put_uobj_read(uobj);
175                 return NULL;
176         }
177
178         return uobj;
179 }
180
181 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
182                                          struct ib_ucontext *context)
183 {
184         struct ib_uobject *uobj;
185
186         uobj = __idr_get_uobj(idr, id, context);
187         if (!uobj)
188                 return NULL;
189
190         down_write(&uobj->mutex);
191         if (!uobj->live) {
192                 put_uobj_write(uobj);
193                 return NULL;
194         }
195
196         return uobj;
197 }
198
199 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
200                           int nested)
201 {
202         struct ib_uobject *uobj;
203
204         uobj = idr_read_uobj(idr, id, context, nested);
205         return uobj ? uobj->object : NULL;
206 }
207
208 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
209 {
210         return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
211 }
212
213 static void put_pd_read(struct ib_pd *pd)
214 {
215         put_uobj_read(pd->uobject);
216 }
217
218 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
219 {
220         return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
221 }
222
223 static void put_cq_read(struct ib_cq *cq)
224 {
225         put_uobj_read(cq->uobject);
226 }
227
228 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
229 {
230         return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
231 }
232
233 static void put_ah_read(struct ib_ah *ah)
234 {
235         put_uobj_read(ah->uobject);
236 }
237
238 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
239 {
240         return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
241 }
242
243 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
244 {
245         struct ib_uobject *uobj;
246
247         uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
248         return uobj ? uobj->object : NULL;
249 }
250
251 static void put_qp_read(struct ib_qp *qp)
252 {
253         put_uobj_read(qp->uobject);
254 }
255
256 static void put_qp_write(struct ib_qp *qp)
257 {
258         put_uobj_write(qp->uobject);
259 }
260
261 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
262 {
263         return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
264 }
265
266 static void put_srq_read(struct ib_srq *srq)
267 {
268         put_uobj_read(srq->uobject);
269 }
270
271 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
272                                      struct ib_uobject **uobj)
273 {
274         *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
275         return *uobj ? (*uobj)->object : NULL;
276 }
277
278 static void put_xrcd_read(struct ib_uobject *uobj)
279 {
280         put_uobj_read(uobj);
281 }
282
283 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
284                               const char __user *buf,
285                               int in_len, int out_len)
286 {
287         struct ib_uverbs_get_context      cmd;
288         struct ib_uverbs_get_context_resp resp;
289         struct ib_udata                   udata;
290         struct ib_device                 *ibdev = file->device->ib_dev;
291         struct ib_ucontext               *ucontext;
292         struct file                      *filp;
293         int ret;
294
295         if (out_len < sizeof resp)
296                 return -ENOSPC;
297
298         if (copy_from_user(&cmd, buf, sizeof cmd))
299                 return -EFAULT;
300
301         mutex_lock(&file->mutex);
302
303         if (file->ucontext) {
304                 ret = -EINVAL;
305                 goto err;
306         }
307
308         INIT_UDATA(&udata, buf + sizeof cmd,
309                    (unsigned long) cmd.response + sizeof resp,
310                    in_len - sizeof cmd, out_len - sizeof resp);
311
312         ucontext = ibdev->alloc_ucontext(ibdev, &udata);
313         if (IS_ERR(ucontext)) {
314                 ret = PTR_ERR(ucontext);
315                 goto err;
316         }
317
318         ucontext->device = ibdev;
319         INIT_LIST_HEAD(&ucontext->pd_list);
320         INIT_LIST_HEAD(&ucontext->mr_list);
321         INIT_LIST_HEAD(&ucontext->mw_list);
322         INIT_LIST_HEAD(&ucontext->cq_list);
323         INIT_LIST_HEAD(&ucontext->qp_list);
324         INIT_LIST_HEAD(&ucontext->srq_list);
325         INIT_LIST_HEAD(&ucontext->ah_list);
326         INIT_LIST_HEAD(&ucontext->xrcd_list);
327         INIT_LIST_HEAD(&ucontext->rule_list);
328         ucontext->closing = 0;
329
330         resp.num_comp_vectors = file->device->num_comp_vectors;
331
332         ret = get_unused_fd_flags(O_CLOEXEC);
333         if (ret < 0)
334                 goto err_free;
335         resp.async_fd = ret;
336
337         filp = ib_uverbs_alloc_event_file(file, 1);
338         if (IS_ERR(filp)) {
339                 ret = PTR_ERR(filp);
340                 goto err_fd;
341         }
342
343         if (copy_to_user((void __user *) (unsigned long) cmd.response,
344                          &resp, sizeof resp)) {
345                 ret = -EFAULT;
346                 goto err_file;
347         }
348
349         file->async_file = filp->private_data;
350
351         INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
352                               ib_uverbs_event_handler);
353         ret = ib_register_event_handler(&file->event_handler);
354         if (ret)
355                 goto err_file;
356
357         kref_get(&file->async_file->ref);
358         kref_get(&file->ref);
359         file->ucontext = ucontext;
360
361         fd_install(resp.async_fd, filp);
362
363         mutex_unlock(&file->mutex);
364
365         return in_len;
366
367 err_file:
368         fput(filp);
369
370 err_fd:
371         put_unused_fd(resp.async_fd);
372
373 err_free:
374         ibdev->dealloc_ucontext(ucontext);
375
376 err:
377         mutex_unlock(&file->mutex);
378         return ret;
379 }
380
381 static void copy_query_dev_fields(struct ib_uverbs_file *file,
382                                   struct ib_uverbs_query_device_resp *resp,
383                                   struct ib_device_attr *attr)
384 {
385         resp->fw_ver            = attr->fw_ver;
386         resp->node_guid         = file->device->ib_dev->node_guid;
387         resp->sys_image_guid    = attr->sys_image_guid;
388         resp->max_mr_size       = attr->max_mr_size;
389         resp->page_size_cap     = attr->page_size_cap;
390         resp->vendor_id         = attr->vendor_id;
391         resp->vendor_part_id    = attr->vendor_part_id;
392         resp->hw_ver            = attr->hw_ver;
393         resp->max_qp            = attr->max_qp;
394         resp->max_qp_wr         = attr->max_qp_wr;
395         resp->device_cap_flags  = attr->device_cap_flags;
396         resp->max_sge           = attr->max_sge;
397         resp->max_sge_rd        = attr->max_sge_rd;
398         resp->max_cq            = attr->max_cq;
399         resp->max_cqe           = attr->max_cqe;
400         resp->max_mr            = attr->max_mr;
401         resp->max_pd            = attr->max_pd;
402         resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
403         resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
404         resp->max_res_rd_atom   = attr->max_res_rd_atom;
405         resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
406         resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
407         resp->atomic_cap                = attr->atomic_cap;
408         resp->max_ee                    = attr->max_ee;
409         resp->max_rdd                   = attr->max_rdd;
410         resp->max_mw                    = attr->max_mw;
411         resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
412         resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
413         resp->max_mcast_grp             = attr->max_mcast_grp;
414         resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
415         resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
416         resp->max_ah                    = attr->max_ah;
417         resp->max_fmr                   = attr->max_fmr;
418         resp->max_map_per_fmr           = attr->max_map_per_fmr;
419         resp->max_srq                   = attr->max_srq;
420         resp->max_srq_wr                = attr->max_srq_wr;
421         resp->max_srq_sge               = attr->max_srq_sge;
422         resp->max_pkeys                 = attr->max_pkeys;
423         resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
424         resp->phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
425 }
426
427 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
428                                const char __user *buf,
429                                int in_len, int out_len)
430 {
431         struct ib_uverbs_query_device      cmd;
432         struct ib_uverbs_query_device_resp resp;
433         struct ib_device_attr              attr;
434         int                                ret;
435
436         if (out_len < sizeof resp)
437                 return -ENOSPC;
438
439         if (copy_from_user(&cmd, buf, sizeof cmd))
440                 return -EFAULT;
441
442         ret = ib_query_device(file->device->ib_dev, &attr);
443         if (ret)
444                 return ret;
445
446         memset(&resp, 0, sizeof resp);
447         copy_query_dev_fields(file, &resp, &attr);
448
449         if (copy_to_user((void __user *) (unsigned long) cmd.response,
450                          &resp, sizeof resp))
451                 return -EFAULT;
452
453         return in_len;
454 }
455
456 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
457                              const char __user *buf,
458                              int in_len, int out_len)
459 {
460         struct ib_uverbs_query_port      cmd;
461         struct ib_uverbs_query_port_resp resp;
462         struct ib_port_attr              attr;
463         int                              ret;
464
465         if (out_len < sizeof resp)
466                 return -ENOSPC;
467
468         if (copy_from_user(&cmd, buf, sizeof cmd))
469                 return -EFAULT;
470
471         ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
472         if (ret)
473                 return ret;
474
475         memset(&resp, 0, sizeof resp);
476
477         resp.state           = attr.state;
478         resp.max_mtu         = attr.max_mtu;
479         resp.active_mtu      = attr.active_mtu;
480         resp.gid_tbl_len     = attr.gid_tbl_len;
481         resp.port_cap_flags  = attr.port_cap_flags;
482         resp.max_msg_sz      = attr.max_msg_sz;
483         resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
484         resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
485         resp.pkey_tbl_len    = attr.pkey_tbl_len;
486         resp.lid             = attr.lid;
487         resp.sm_lid          = attr.sm_lid;
488         resp.lmc             = attr.lmc;
489         resp.max_vl_num      = attr.max_vl_num;
490         resp.sm_sl           = attr.sm_sl;
491         resp.subnet_timeout  = attr.subnet_timeout;
492         resp.init_type_reply = attr.init_type_reply;
493         resp.active_width    = attr.active_width;
494         resp.active_speed    = attr.active_speed;
495         resp.phys_state      = attr.phys_state;
496         resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
497                                                         cmd.port_num);
498
499         if (copy_to_user((void __user *) (unsigned long) cmd.response,
500                          &resp, sizeof resp))
501                 return -EFAULT;
502
503         return in_len;
504 }
505
506 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
507                            const char __user *buf,
508                            int in_len, int out_len)
509 {
510         struct ib_uverbs_alloc_pd      cmd;
511         struct ib_uverbs_alloc_pd_resp resp;
512         struct ib_udata                udata;
513         struct ib_uobject             *uobj;
514         struct ib_pd                  *pd;
515         int                            ret;
516
517         if (out_len < sizeof resp)
518                 return -ENOSPC;
519
520         if (copy_from_user(&cmd, buf, sizeof cmd))
521                 return -EFAULT;
522
523         INIT_UDATA(&udata, buf + sizeof cmd,
524                    (unsigned long) cmd.response + sizeof resp,
525                    in_len - sizeof cmd, out_len - sizeof resp);
526
527         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
528         if (!uobj)
529                 return -ENOMEM;
530
531         init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
532         down_write(&uobj->mutex);
533
534         pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
535                                             file->ucontext, &udata);
536         if (IS_ERR(pd)) {
537                 ret = PTR_ERR(pd);
538                 goto err;
539         }
540
541         pd->device  = file->device->ib_dev;
542         pd->uobject = uobj;
543         atomic_set(&pd->usecnt, 0);
544
545         uobj->object = pd;
546         ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
547         if (ret)
548                 goto err_idr;
549
550         memset(&resp, 0, sizeof resp);
551         resp.pd_handle = uobj->id;
552
553         if (copy_to_user((void __user *) (unsigned long) cmd.response,
554                          &resp, sizeof resp)) {
555                 ret = -EFAULT;
556                 goto err_copy;
557         }
558
559         mutex_lock(&file->mutex);
560         list_add_tail(&uobj->list, &file->ucontext->pd_list);
561         mutex_unlock(&file->mutex);
562
563         uobj->live = 1;
564
565         up_write(&uobj->mutex);
566
567         return in_len;
568
569 err_copy:
570         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
571
572 err_idr:
573         ib_dealloc_pd(pd);
574
575 err:
576         put_uobj_write(uobj);
577         return ret;
578 }
579
580 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
581                              const char __user *buf,
582                              int in_len, int out_len)
583 {
584         struct ib_uverbs_dealloc_pd cmd;
585         struct ib_uobject          *uobj;
586         int                         ret;
587
588         if (copy_from_user(&cmd, buf, sizeof cmd))
589                 return -EFAULT;
590
591         uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
592         if (!uobj)
593                 return -EINVAL;
594
595         ret = ib_dealloc_pd(uobj->object);
596         if (!ret)
597                 uobj->live = 0;
598
599         put_uobj_write(uobj);
600
601         if (ret)
602                 return ret;
603
604         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
605
606         mutex_lock(&file->mutex);
607         list_del(&uobj->list);
608         mutex_unlock(&file->mutex);
609
610         put_uobj(uobj);
611
612         return in_len;
613 }
614
615 struct xrcd_table_entry {
616         struct rb_node  node;
617         struct ib_xrcd *xrcd;
618         struct inode   *inode;
619 };
620
621 static int xrcd_table_insert(struct ib_uverbs_device *dev,
622                             struct inode *inode,
623                             struct ib_xrcd *xrcd)
624 {
625         struct xrcd_table_entry *entry, *scan;
626         struct rb_node **p = &dev->xrcd_tree.rb_node;
627         struct rb_node *parent = NULL;
628
629         entry = kmalloc(sizeof *entry, GFP_KERNEL);
630         if (!entry)
631                 return -ENOMEM;
632
633         entry->xrcd  = xrcd;
634         entry->inode = inode;
635
636         while (*p) {
637                 parent = *p;
638                 scan = rb_entry(parent, struct xrcd_table_entry, node);
639
640                 if (inode < scan->inode) {
641                         p = &(*p)->rb_left;
642                 } else if (inode > scan->inode) {
643                         p = &(*p)->rb_right;
644                 } else {
645                         kfree(entry);
646                         return -EEXIST;
647                 }
648         }
649
650         rb_link_node(&entry->node, parent, p);
651         rb_insert_color(&entry->node, &dev->xrcd_tree);
652         igrab(inode);
653         return 0;
654 }
655
656 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
657                                                   struct inode *inode)
658 {
659         struct xrcd_table_entry *entry;
660         struct rb_node *p = dev->xrcd_tree.rb_node;
661
662         while (p) {
663                 entry = rb_entry(p, struct xrcd_table_entry, node);
664
665                 if (inode < entry->inode)
666                         p = p->rb_left;
667                 else if (inode > entry->inode)
668                         p = p->rb_right;
669                 else
670                         return entry;
671         }
672
673         return NULL;
674 }
675
676 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
677 {
678         struct xrcd_table_entry *entry;
679
680         entry = xrcd_table_search(dev, inode);
681         if (!entry)
682                 return NULL;
683
684         return entry->xrcd;
685 }
686
687 static void xrcd_table_delete(struct ib_uverbs_device *dev,
688                               struct inode *inode)
689 {
690         struct xrcd_table_entry *entry;
691
692         entry = xrcd_table_search(dev, inode);
693         if (entry) {
694                 iput(inode);
695                 rb_erase(&entry->node, &dev->xrcd_tree);
696                 kfree(entry);
697         }
698 }
699
700 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
701                             const char __user *buf, int in_len,
702                             int out_len)
703 {
704         struct ib_uverbs_open_xrcd      cmd;
705         struct ib_uverbs_open_xrcd_resp resp;
706         struct ib_udata                 udata;
707         struct ib_uxrcd_object         *obj;
708         struct ib_xrcd                 *xrcd = NULL;
709         struct fd                       f = {NULL, 0};
710         struct inode                   *inode = NULL;
711         int                             ret = 0;
712         int                             new_xrcd = 0;
713
714         if (out_len < sizeof resp)
715                 return -ENOSPC;
716
717         if (copy_from_user(&cmd, buf, sizeof cmd))
718                 return -EFAULT;
719
720         INIT_UDATA(&udata, buf + sizeof cmd,
721                    (unsigned long) cmd.response + sizeof resp,
722                    in_len - sizeof cmd, out_len - sizeof  resp);
723
724         mutex_lock(&file->device->xrcd_tree_mutex);
725
726         if (cmd.fd != -1) {
727                 /* search for file descriptor */
728                 f = fdget(cmd.fd);
729                 if (!f.file) {
730                         ret = -EBADF;
731                         goto err_tree_mutex_unlock;
732                 }
733
734                 inode = file_inode(f.file);
735                 xrcd = find_xrcd(file->device, inode);
736                 if (!xrcd && !(cmd.oflags & O_CREAT)) {
737                         /* no file descriptor. Need CREATE flag */
738                         ret = -EAGAIN;
739                         goto err_tree_mutex_unlock;
740                 }
741
742                 if (xrcd && cmd.oflags & O_EXCL) {
743                         ret = -EINVAL;
744                         goto err_tree_mutex_unlock;
745                 }
746         }
747
748         obj = kmalloc(sizeof *obj, GFP_KERNEL);
749         if (!obj) {
750                 ret = -ENOMEM;
751                 goto err_tree_mutex_unlock;
752         }
753
754         init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
755
756         down_write(&obj->uobject.mutex);
757
758         if (!xrcd) {
759                 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
760                                                         file->ucontext, &udata);
761                 if (IS_ERR(xrcd)) {
762                         ret = PTR_ERR(xrcd);
763                         goto err;
764                 }
765
766                 xrcd->inode   = inode;
767                 xrcd->device  = file->device->ib_dev;
768                 atomic_set(&xrcd->usecnt, 0);
769                 mutex_init(&xrcd->tgt_qp_mutex);
770                 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
771                 new_xrcd = 1;
772         }
773
774         atomic_set(&obj->refcnt, 0);
775         obj->uobject.object = xrcd;
776         ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
777         if (ret)
778                 goto err_idr;
779
780         memset(&resp, 0, sizeof resp);
781         resp.xrcd_handle = obj->uobject.id;
782
783         if (inode) {
784                 if (new_xrcd) {
785                         /* create new inode/xrcd table entry */
786                         ret = xrcd_table_insert(file->device, inode, xrcd);
787                         if (ret)
788                                 goto err_insert_xrcd;
789                 }
790                 atomic_inc(&xrcd->usecnt);
791         }
792
793         if (copy_to_user((void __user *) (unsigned long) cmd.response,
794                          &resp, sizeof resp)) {
795                 ret = -EFAULT;
796                 goto err_copy;
797         }
798
799         if (f.file)
800                 fdput(f);
801
802         mutex_lock(&file->mutex);
803         list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
804         mutex_unlock(&file->mutex);
805
806         obj->uobject.live = 1;
807         up_write(&obj->uobject.mutex);
808
809         mutex_unlock(&file->device->xrcd_tree_mutex);
810         return in_len;
811
812 err_copy:
813         if (inode) {
814                 if (new_xrcd)
815                         xrcd_table_delete(file->device, inode);
816                 atomic_dec(&xrcd->usecnt);
817         }
818
819 err_insert_xrcd:
820         idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
821
822 err_idr:
823         ib_dealloc_xrcd(xrcd);
824
825 err:
826         put_uobj_write(&obj->uobject);
827
828 err_tree_mutex_unlock:
829         if (f.file)
830                 fdput(f);
831
832         mutex_unlock(&file->device->xrcd_tree_mutex);
833
834         return ret;
835 }
836
837 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
838                              const char __user *buf, int in_len,
839                              int out_len)
840 {
841         struct ib_uverbs_close_xrcd cmd;
842         struct ib_uobject           *uobj;
843         struct ib_xrcd              *xrcd = NULL;
844         struct inode                *inode = NULL;
845         struct ib_uxrcd_object      *obj;
846         int                         live;
847         int                         ret = 0;
848
849         if (copy_from_user(&cmd, buf, sizeof cmd))
850                 return -EFAULT;
851
852         mutex_lock(&file->device->xrcd_tree_mutex);
853         uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
854         if (!uobj) {
855                 ret = -EINVAL;
856                 goto out;
857         }
858
859         xrcd  = uobj->object;
860         inode = xrcd->inode;
861         obj   = container_of(uobj, struct ib_uxrcd_object, uobject);
862         if (atomic_read(&obj->refcnt)) {
863                 put_uobj_write(uobj);
864                 ret = -EBUSY;
865                 goto out;
866         }
867
868         if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
869                 ret = ib_dealloc_xrcd(uobj->object);
870                 if (!ret)
871                         uobj->live = 0;
872         }
873
874         live = uobj->live;
875         if (inode && ret)
876                 atomic_inc(&xrcd->usecnt);
877
878         put_uobj_write(uobj);
879
880         if (ret)
881                 goto out;
882
883         if (inode && !live)
884                 xrcd_table_delete(file->device, inode);
885
886         idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
887         mutex_lock(&file->mutex);
888         list_del(&uobj->list);
889         mutex_unlock(&file->mutex);
890
891         put_uobj(uobj);
892         ret = in_len;
893
894 out:
895         mutex_unlock(&file->device->xrcd_tree_mutex);
896         return ret;
897 }
898
899 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
900                             struct ib_xrcd *xrcd)
901 {
902         struct inode *inode;
903
904         inode = xrcd->inode;
905         if (inode && !atomic_dec_and_test(&xrcd->usecnt))
906                 return;
907
908         ib_dealloc_xrcd(xrcd);
909
910         if (inode)
911                 xrcd_table_delete(dev, inode);
912 }
913
914 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
915                          const char __user *buf, int in_len,
916                          int out_len)
917 {
918         struct ib_uverbs_reg_mr      cmd;
919         struct ib_uverbs_reg_mr_resp resp;
920         struct ib_udata              udata;
921         struct ib_uobject           *uobj;
922         struct ib_pd                *pd;
923         struct ib_mr                *mr;
924         int                          ret;
925
926         if (out_len < sizeof resp)
927                 return -ENOSPC;
928
929         if (copy_from_user(&cmd, buf, sizeof cmd))
930                 return -EFAULT;
931
932         INIT_UDATA(&udata, buf + sizeof cmd,
933                    (unsigned long) cmd.response + sizeof resp,
934                    in_len - sizeof cmd, out_len - sizeof resp);
935
936         if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
937                 return -EINVAL;
938
939         ret = ib_check_mr_access(cmd.access_flags);
940         if (ret)
941                 return ret;
942
943         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
944         if (!uobj)
945                 return -ENOMEM;
946
947         init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
948         down_write(&uobj->mutex);
949
950         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
951         if (!pd) {
952                 ret = -EINVAL;
953                 goto err_free;
954         }
955
956         mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
957                                      cmd.access_flags, &udata);
958         if (IS_ERR(mr)) {
959                 ret = PTR_ERR(mr);
960                 goto err_put;
961         }
962
963         mr->device  = pd->device;
964         mr->pd      = pd;
965         mr->uobject = uobj;
966         atomic_inc(&pd->usecnt);
967         atomic_set(&mr->usecnt, 0);
968
969         uobj->object = mr;
970         ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
971         if (ret)
972                 goto err_unreg;
973
974         memset(&resp, 0, sizeof resp);
975         resp.lkey      = mr->lkey;
976         resp.rkey      = mr->rkey;
977         resp.mr_handle = uobj->id;
978
979         if (copy_to_user((void __user *) (unsigned long) cmd.response,
980                          &resp, sizeof resp)) {
981                 ret = -EFAULT;
982                 goto err_copy;
983         }
984
985         put_pd_read(pd);
986
987         mutex_lock(&file->mutex);
988         list_add_tail(&uobj->list, &file->ucontext->mr_list);
989         mutex_unlock(&file->mutex);
990
991         uobj->live = 1;
992
993         up_write(&uobj->mutex);
994
995         return in_len;
996
997 err_copy:
998         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
999
1000 err_unreg:
1001         ib_dereg_mr(mr);
1002
1003 err_put:
1004         put_pd_read(pd);
1005
1006 err_free:
1007         put_uobj_write(uobj);
1008         return ret;
1009 }
1010
1011 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1012                            const char __user *buf, int in_len,
1013                            int out_len)
1014 {
1015         struct ib_uverbs_rereg_mr      cmd;
1016         struct ib_uverbs_rereg_mr_resp resp;
1017         struct ib_udata              udata;
1018         struct ib_pd                *pd = NULL;
1019         struct ib_mr                *mr;
1020         struct ib_pd                *old_pd;
1021         int                          ret;
1022         struct ib_uobject           *uobj;
1023
1024         if (out_len < sizeof(resp))
1025                 return -ENOSPC;
1026
1027         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1028                 return -EFAULT;
1029
1030         INIT_UDATA(&udata, buf + sizeof(cmd),
1031                    (unsigned long) cmd.response + sizeof(resp),
1032                    in_len - sizeof(cmd), out_len - sizeof(resp));
1033
1034         if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1035                 return -EINVAL;
1036
1037         if ((cmd.flags & IB_MR_REREG_TRANS) &&
1038             (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1039              (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1040                         return -EINVAL;
1041
1042         uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1043                               file->ucontext);
1044
1045         if (!uobj)
1046                 return -EINVAL;
1047
1048         mr = uobj->object;
1049
1050         if (cmd.flags & IB_MR_REREG_ACCESS) {
1051                 ret = ib_check_mr_access(cmd.access_flags);
1052                 if (ret)
1053                         goto put_uobjs;
1054         }
1055
1056         if (cmd.flags & IB_MR_REREG_PD) {
1057                 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1058                 if (!pd) {
1059                         ret = -EINVAL;
1060                         goto put_uobjs;
1061                 }
1062         }
1063
1064         if (atomic_read(&mr->usecnt)) {
1065                 ret = -EBUSY;
1066                 goto put_uobj_pd;
1067         }
1068
1069         old_pd = mr->pd;
1070         ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1071                                         cmd.length, cmd.hca_va,
1072                                         cmd.access_flags, pd, &udata);
1073         if (!ret) {
1074                 if (cmd.flags & IB_MR_REREG_PD) {
1075                         atomic_inc(&pd->usecnt);
1076                         mr->pd = pd;
1077                         atomic_dec(&old_pd->usecnt);
1078                 }
1079         } else {
1080                 goto put_uobj_pd;
1081         }
1082
1083         memset(&resp, 0, sizeof(resp));
1084         resp.lkey      = mr->lkey;
1085         resp.rkey      = mr->rkey;
1086
1087         if (copy_to_user((void __user *)(unsigned long)cmd.response,
1088                          &resp, sizeof(resp)))
1089                 ret = -EFAULT;
1090         else
1091                 ret = in_len;
1092
1093 put_uobj_pd:
1094         if (cmd.flags & IB_MR_REREG_PD)
1095                 put_pd_read(pd);
1096
1097 put_uobjs:
1098
1099         put_uobj_write(mr->uobject);
1100
1101         return ret;
1102 }
1103
1104 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1105                            const char __user *buf, int in_len,
1106                            int out_len)
1107 {
1108         struct ib_uverbs_dereg_mr cmd;
1109         struct ib_mr             *mr;
1110         struct ib_uobject        *uobj;
1111         int                       ret = -EINVAL;
1112
1113         if (copy_from_user(&cmd, buf, sizeof cmd))
1114                 return -EFAULT;
1115
1116         uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1117         if (!uobj)
1118                 return -EINVAL;
1119
1120         mr = uobj->object;
1121
1122         ret = ib_dereg_mr(mr);
1123         if (!ret)
1124                 uobj->live = 0;
1125
1126         put_uobj_write(uobj);
1127
1128         if (ret)
1129                 return ret;
1130
1131         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1132
1133         mutex_lock(&file->mutex);
1134         list_del(&uobj->list);
1135         mutex_unlock(&file->mutex);
1136
1137         put_uobj(uobj);
1138
1139         return in_len;
1140 }
1141
1142 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1143                          const char __user *buf, int in_len,
1144                          int out_len)
1145 {
1146         struct ib_uverbs_alloc_mw      cmd;
1147         struct ib_uverbs_alloc_mw_resp resp;
1148         struct ib_uobject             *uobj;
1149         struct ib_pd                  *pd;
1150         struct ib_mw                  *mw;
1151         int                            ret;
1152
1153         if (out_len < sizeof(resp))
1154                 return -ENOSPC;
1155
1156         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1157                 return -EFAULT;
1158
1159         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1160         if (!uobj)
1161                 return -ENOMEM;
1162
1163         init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1164         down_write(&uobj->mutex);
1165
1166         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1167         if (!pd) {
1168                 ret = -EINVAL;
1169                 goto err_free;
1170         }
1171
1172         mw = pd->device->alloc_mw(pd, cmd.mw_type);
1173         if (IS_ERR(mw)) {
1174                 ret = PTR_ERR(mw);
1175                 goto err_put;
1176         }
1177
1178         mw->device  = pd->device;
1179         mw->pd      = pd;
1180         mw->uobject = uobj;
1181         atomic_inc(&pd->usecnt);
1182
1183         uobj->object = mw;
1184         ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1185         if (ret)
1186                 goto err_unalloc;
1187
1188         memset(&resp, 0, sizeof(resp));
1189         resp.rkey      = mw->rkey;
1190         resp.mw_handle = uobj->id;
1191
1192         if (copy_to_user((void __user *)(unsigned long)cmd.response,
1193                          &resp, sizeof(resp))) {
1194                 ret = -EFAULT;
1195                 goto err_copy;
1196         }
1197
1198         put_pd_read(pd);
1199
1200         mutex_lock(&file->mutex);
1201         list_add_tail(&uobj->list, &file->ucontext->mw_list);
1202         mutex_unlock(&file->mutex);
1203
1204         uobj->live = 1;
1205
1206         up_write(&uobj->mutex);
1207
1208         return in_len;
1209
1210 err_copy:
1211         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1212
1213 err_unalloc:
1214         ib_dealloc_mw(mw);
1215
1216 err_put:
1217         put_pd_read(pd);
1218
1219 err_free:
1220         put_uobj_write(uobj);
1221         return ret;
1222 }
1223
1224 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1225                            const char __user *buf, int in_len,
1226                            int out_len)
1227 {
1228         struct ib_uverbs_dealloc_mw cmd;
1229         struct ib_mw               *mw;
1230         struct ib_uobject          *uobj;
1231         int                         ret = -EINVAL;
1232
1233         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1234                 return -EFAULT;
1235
1236         uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1237         if (!uobj)
1238                 return -EINVAL;
1239
1240         mw = uobj->object;
1241
1242         ret = ib_dealloc_mw(mw);
1243         if (!ret)
1244                 uobj->live = 0;
1245
1246         put_uobj_write(uobj);
1247
1248         if (ret)
1249                 return ret;
1250
1251         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1252
1253         mutex_lock(&file->mutex);
1254         list_del(&uobj->list);
1255         mutex_unlock(&file->mutex);
1256
1257         put_uobj(uobj);
1258
1259         return in_len;
1260 }
1261
1262 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1263                                       const char __user *buf, int in_len,
1264                                       int out_len)
1265 {
1266         struct ib_uverbs_create_comp_channel       cmd;
1267         struct ib_uverbs_create_comp_channel_resp  resp;
1268         struct file                               *filp;
1269         int ret;
1270
1271         if (out_len < sizeof resp)
1272                 return -ENOSPC;
1273
1274         if (copy_from_user(&cmd, buf, sizeof cmd))
1275                 return -EFAULT;
1276
1277         ret = get_unused_fd_flags(O_CLOEXEC);
1278         if (ret < 0)
1279                 return ret;
1280         resp.fd = ret;
1281
1282         filp = ib_uverbs_alloc_event_file(file, 0);
1283         if (IS_ERR(filp)) {
1284                 put_unused_fd(resp.fd);
1285                 return PTR_ERR(filp);
1286         }
1287
1288         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1289                          &resp, sizeof resp)) {
1290                 put_unused_fd(resp.fd);
1291                 fput(filp);
1292                 return -EFAULT;
1293         }
1294
1295         fd_install(resp.fd, filp);
1296         return in_len;
1297 }
1298
1299 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1300                             const char __user *buf, int in_len,
1301                             int out_len)
1302 {
1303         struct ib_uverbs_create_cq      cmd;
1304         struct ib_uverbs_create_cq_resp resp;
1305         struct ib_udata                 udata;
1306         struct ib_ucq_object           *obj;
1307         struct ib_uverbs_event_file    *ev_file = NULL;
1308         struct ib_cq                   *cq;
1309         int                             ret;
1310
1311         if (out_len < sizeof resp)
1312                 return -ENOSPC;
1313
1314         if (copy_from_user(&cmd, buf, sizeof cmd))
1315                 return -EFAULT;
1316
1317         INIT_UDATA(&udata, buf + sizeof cmd,
1318                    (unsigned long) cmd.response + sizeof resp,
1319                    in_len - sizeof cmd, out_len - sizeof resp);
1320
1321         if (cmd.comp_vector >= file->device->num_comp_vectors)
1322                 return -EINVAL;
1323
1324         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1325         if (!obj)
1326                 return -ENOMEM;
1327
1328         init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
1329         down_write(&obj->uobject.mutex);
1330
1331         if (cmd.comp_channel >= 0) {
1332                 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1333                 if (!ev_file) {
1334                         ret = -EINVAL;
1335                         goto err;
1336                 }
1337         }
1338
1339         obj->uverbs_file           = file;
1340         obj->comp_events_reported  = 0;
1341         obj->async_events_reported = 0;
1342         INIT_LIST_HEAD(&obj->comp_list);
1343         INIT_LIST_HEAD(&obj->async_list);
1344
1345         cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1346                                              cmd.comp_vector,
1347                                              file->ucontext, &udata);
1348         if (IS_ERR(cq)) {
1349                 ret = PTR_ERR(cq);
1350                 goto err_file;
1351         }
1352
1353         cq->device        = file->device->ib_dev;
1354         cq->uobject       = &obj->uobject;
1355         cq->comp_handler  = ib_uverbs_comp_handler;
1356         cq->event_handler = ib_uverbs_cq_event_handler;
1357         cq->cq_context    = ev_file;
1358         atomic_set(&cq->usecnt, 0);
1359
1360         obj->uobject.object = cq;
1361         ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1362         if (ret)
1363                 goto err_free;
1364
1365         memset(&resp, 0, sizeof resp);
1366         resp.cq_handle = obj->uobject.id;
1367         resp.cqe       = cq->cqe;
1368
1369         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1370                          &resp, sizeof resp)) {
1371                 ret = -EFAULT;
1372                 goto err_copy;
1373         }
1374
1375         mutex_lock(&file->mutex);
1376         list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1377         mutex_unlock(&file->mutex);
1378
1379         obj->uobject.live = 1;
1380
1381         up_write(&obj->uobject.mutex);
1382
1383         return in_len;
1384
1385 err_copy:
1386         idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1387
1388 err_free:
1389         ib_destroy_cq(cq);
1390
1391 err_file:
1392         if (ev_file)
1393                 ib_uverbs_release_ucq(file, ev_file, obj);
1394
1395 err:
1396         put_uobj_write(&obj->uobject);
1397         return ret;
1398 }
1399
1400 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1401                             const char __user *buf, int in_len,
1402                             int out_len)
1403 {
1404         struct ib_uverbs_resize_cq      cmd;
1405         struct ib_uverbs_resize_cq_resp resp;
1406         struct ib_udata                 udata;
1407         struct ib_cq                    *cq;
1408         int                             ret = -EINVAL;
1409
1410         if (copy_from_user(&cmd, buf, sizeof cmd))
1411                 return -EFAULT;
1412
1413         INIT_UDATA(&udata, buf + sizeof cmd,
1414                    (unsigned long) cmd.response + sizeof resp,
1415                    in_len - sizeof cmd, out_len - sizeof resp);
1416
1417         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1418         if (!cq)
1419                 return -EINVAL;
1420
1421         ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1422         if (ret)
1423                 goto out;
1424
1425         resp.cqe = cq->cqe;
1426
1427         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1428                          &resp, sizeof resp.cqe))
1429                 ret = -EFAULT;
1430
1431 out:
1432         put_cq_read(cq);
1433
1434         return ret ? ret : in_len;
1435 }
1436
1437 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1438 {
1439         struct ib_uverbs_wc tmp;
1440
1441         tmp.wr_id               = wc->wr_id;
1442         tmp.status              = wc->status;
1443         tmp.opcode              = wc->opcode;
1444         tmp.vendor_err          = wc->vendor_err;
1445         tmp.byte_len            = wc->byte_len;
1446         tmp.ex.imm_data         = (__u32 __force) wc->ex.imm_data;
1447         tmp.qp_num              = wc->qp->qp_num;
1448         tmp.src_qp              = wc->src_qp;
1449         tmp.wc_flags            = wc->wc_flags;
1450         tmp.pkey_index          = wc->pkey_index;
1451         tmp.slid                = wc->slid;
1452         tmp.sl                  = wc->sl;
1453         tmp.dlid_path_bits      = wc->dlid_path_bits;
1454         tmp.port_num            = wc->port_num;
1455         tmp.reserved            = 0;
1456
1457         if (copy_to_user(dest, &tmp, sizeof tmp))
1458                 return -EFAULT;
1459
1460         return 0;
1461 }
1462
1463 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1464                           const char __user *buf, int in_len,
1465                           int out_len)
1466 {
1467         struct ib_uverbs_poll_cq       cmd;
1468         struct ib_uverbs_poll_cq_resp  resp;
1469         u8 __user                     *header_ptr;
1470         u8 __user                     *data_ptr;
1471         struct ib_cq                  *cq;
1472         struct ib_wc                   wc;
1473         int                            ret;
1474
1475         if (copy_from_user(&cmd, buf, sizeof cmd))
1476                 return -EFAULT;
1477
1478         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1479         if (!cq)
1480                 return -EINVAL;
1481
1482         /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1483         header_ptr = (void __user *)(unsigned long) cmd.response;
1484         data_ptr = header_ptr + sizeof resp;
1485
1486         memset(&resp, 0, sizeof resp);
1487         while (resp.count < cmd.ne) {
1488                 ret = ib_poll_cq(cq, 1, &wc);
1489                 if (ret < 0)
1490                         goto out_put;
1491                 if (!ret)
1492                         break;
1493
1494                 ret = copy_wc_to_user(data_ptr, &wc);
1495                 if (ret)
1496                         goto out_put;
1497
1498                 data_ptr += sizeof(struct ib_uverbs_wc);
1499                 ++resp.count;
1500         }
1501
1502         if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1503                 ret = -EFAULT;
1504                 goto out_put;
1505         }
1506
1507         ret = in_len;
1508
1509 out_put:
1510         put_cq_read(cq);
1511         return ret;
1512 }
1513
1514 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1515                                 const char __user *buf, int in_len,
1516                                 int out_len)
1517 {
1518         struct ib_uverbs_req_notify_cq cmd;
1519         struct ib_cq                  *cq;
1520
1521         if (copy_from_user(&cmd, buf, sizeof cmd))
1522                 return -EFAULT;
1523
1524         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1525         if (!cq)
1526                 return -EINVAL;
1527
1528         ib_req_notify_cq(cq, cmd.solicited_only ?
1529                          IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1530
1531         put_cq_read(cq);
1532
1533         return in_len;
1534 }
1535
1536 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1537                              const char __user *buf, int in_len,
1538                              int out_len)
1539 {
1540         struct ib_uverbs_destroy_cq      cmd;
1541         struct ib_uverbs_destroy_cq_resp resp;
1542         struct ib_uobject               *uobj;
1543         struct ib_cq                    *cq;
1544         struct ib_ucq_object            *obj;
1545         struct ib_uverbs_event_file     *ev_file;
1546         int                              ret = -EINVAL;
1547
1548         if (copy_from_user(&cmd, buf, sizeof cmd))
1549                 return -EFAULT;
1550
1551         uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1552         if (!uobj)
1553                 return -EINVAL;
1554         cq      = uobj->object;
1555         ev_file = cq->cq_context;
1556         obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1557
1558         ret = ib_destroy_cq(cq);
1559         if (!ret)
1560                 uobj->live = 0;
1561
1562         put_uobj_write(uobj);
1563
1564         if (ret)
1565                 return ret;
1566
1567         idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1568
1569         mutex_lock(&file->mutex);
1570         list_del(&uobj->list);
1571         mutex_unlock(&file->mutex);
1572
1573         ib_uverbs_release_ucq(file, ev_file, obj);
1574
1575         memset(&resp, 0, sizeof resp);
1576         resp.comp_events_reported  = obj->comp_events_reported;
1577         resp.async_events_reported = obj->async_events_reported;
1578
1579         put_uobj(uobj);
1580
1581         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1582                          &resp, sizeof resp))
1583                 return -EFAULT;
1584
1585         return in_len;
1586 }
1587
1588 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1589                             const char __user *buf, int in_len,
1590                             int out_len)
1591 {
1592         struct ib_uverbs_create_qp      cmd;
1593         struct ib_uverbs_create_qp_resp resp;
1594         struct ib_udata                 udata;
1595         struct ib_uqp_object           *obj;
1596         struct ib_device               *device;
1597         struct ib_pd                   *pd = NULL;
1598         struct ib_xrcd                 *xrcd = NULL;
1599         struct ib_uobject              *uninitialized_var(xrcd_uobj);
1600         struct ib_cq                   *scq = NULL, *rcq = NULL;
1601         struct ib_srq                  *srq = NULL;
1602         struct ib_qp                   *qp;
1603         struct ib_qp_init_attr          attr;
1604         int ret;
1605
1606         if (out_len < sizeof resp)
1607                 return -ENOSPC;
1608
1609         if (copy_from_user(&cmd, buf, sizeof cmd))
1610                 return -EFAULT;
1611
1612         if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1613                 return -EPERM;
1614
1615         INIT_UDATA(&udata, buf + sizeof cmd,
1616                    (unsigned long) cmd.response + sizeof resp,
1617                    in_len - sizeof cmd, out_len - sizeof resp);
1618
1619         obj = kzalloc(sizeof *obj, GFP_KERNEL);
1620         if (!obj)
1621                 return -ENOMEM;
1622
1623         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1624         down_write(&obj->uevent.uobject.mutex);
1625
1626         if (cmd.qp_type == IB_QPT_XRC_TGT) {
1627                 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1628                 if (!xrcd) {
1629                         ret = -EINVAL;
1630                         goto err_put;
1631                 }
1632                 device = xrcd->device;
1633         } else {
1634                 if (cmd.qp_type == IB_QPT_XRC_INI) {
1635                         cmd.max_recv_wr = cmd.max_recv_sge = 0;
1636                 } else {
1637                         if (cmd.is_srq) {
1638                                 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1639                                 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1640                                         ret = -EINVAL;
1641                                         goto err_put;
1642                                 }
1643                         }
1644
1645                         if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1646                                 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1647                                 if (!rcq) {
1648                                         ret = -EINVAL;
1649                                         goto err_put;
1650                                 }
1651                         }
1652                 }
1653
1654                 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1655                 rcq = rcq ?: scq;
1656                 pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1657                 if (!pd || !scq) {
1658                         ret = -EINVAL;
1659                         goto err_put;
1660                 }
1661
1662                 device = pd->device;
1663         }
1664
1665         attr.event_handler = ib_uverbs_qp_event_handler;
1666         attr.qp_context    = file;
1667         attr.send_cq       = scq;
1668         attr.recv_cq       = rcq;
1669         attr.srq           = srq;
1670         attr.xrcd          = xrcd;
1671         attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1672         attr.qp_type       = cmd.qp_type;
1673         attr.create_flags  = 0;
1674
1675         attr.cap.max_send_wr     = cmd.max_send_wr;
1676         attr.cap.max_recv_wr     = cmd.max_recv_wr;
1677         attr.cap.max_send_sge    = cmd.max_send_sge;
1678         attr.cap.max_recv_sge    = cmd.max_recv_sge;
1679         attr.cap.max_inline_data = cmd.max_inline_data;
1680
1681         obj->uevent.events_reported     = 0;
1682         INIT_LIST_HEAD(&obj->uevent.event_list);
1683         INIT_LIST_HEAD(&obj->mcast_list);
1684
1685         if (cmd.qp_type == IB_QPT_XRC_TGT)
1686                 qp = ib_create_qp(pd, &attr);
1687         else
1688                 qp = device->create_qp(pd, &attr, &udata);
1689
1690         if (IS_ERR(qp)) {
1691                 ret = PTR_ERR(qp);
1692                 goto err_put;
1693         }
1694
1695         if (cmd.qp_type != IB_QPT_XRC_TGT) {
1696                 qp->real_qp       = qp;
1697                 qp->device        = device;
1698                 qp->pd            = pd;
1699                 qp->send_cq       = attr.send_cq;
1700                 qp->recv_cq       = attr.recv_cq;
1701                 qp->srq           = attr.srq;
1702                 qp->event_handler = attr.event_handler;
1703                 qp->qp_context    = attr.qp_context;
1704                 qp->qp_type       = attr.qp_type;
1705                 atomic_set(&qp->usecnt, 0);
1706                 atomic_inc(&pd->usecnt);
1707                 atomic_inc(&attr.send_cq->usecnt);
1708                 if (attr.recv_cq)
1709                         atomic_inc(&attr.recv_cq->usecnt);
1710                 if (attr.srq)
1711                         atomic_inc(&attr.srq->usecnt);
1712         }
1713         qp->uobject = &obj->uevent.uobject;
1714
1715         obj->uevent.uobject.object = qp;
1716         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1717         if (ret)
1718                 goto err_destroy;
1719
1720         memset(&resp, 0, sizeof resp);
1721         resp.qpn             = qp->qp_num;
1722         resp.qp_handle       = obj->uevent.uobject.id;
1723         resp.max_recv_sge    = attr.cap.max_recv_sge;
1724         resp.max_send_sge    = attr.cap.max_send_sge;
1725         resp.max_recv_wr     = attr.cap.max_recv_wr;
1726         resp.max_send_wr     = attr.cap.max_send_wr;
1727         resp.max_inline_data = attr.cap.max_inline_data;
1728
1729         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1730                          &resp, sizeof resp)) {
1731                 ret = -EFAULT;
1732                 goto err_copy;
1733         }
1734
1735         if (xrcd) {
1736                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1737                                           uobject);
1738                 atomic_inc(&obj->uxrcd->refcnt);
1739                 put_xrcd_read(xrcd_uobj);
1740         }
1741
1742         if (pd)
1743                 put_pd_read(pd);
1744         if (scq)
1745                 put_cq_read(scq);
1746         if (rcq && rcq != scq)
1747                 put_cq_read(rcq);
1748         if (srq)
1749                 put_srq_read(srq);
1750
1751         mutex_lock(&file->mutex);
1752         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1753         mutex_unlock(&file->mutex);
1754
1755         obj->uevent.uobject.live = 1;
1756
1757         up_write(&obj->uevent.uobject.mutex);
1758
1759         return in_len;
1760
1761 err_copy:
1762         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1763
1764 err_destroy:
1765         ib_destroy_qp(qp);
1766
1767 err_put:
1768         if (xrcd)
1769                 put_xrcd_read(xrcd_uobj);
1770         if (pd)
1771                 put_pd_read(pd);
1772         if (scq)
1773                 put_cq_read(scq);
1774         if (rcq && rcq != scq)
1775                 put_cq_read(rcq);
1776         if (srq)
1777                 put_srq_read(srq);
1778
1779         put_uobj_write(&obj->uevent.uobject);
1780         return ret;
1781 }
1782
1783 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1784                           const char __user *buf, int in_len, int out_len)
1785 {
1786         struct ib_uverbs_open_qp        cmd;
1787         struct ib_uverbs_create_qp_resp resp;
1788         struct ib_udata                 udata;
1789         struct ib_uqp_object           *obj;
1790         struct ib_xrcd                 *xrcd;
1791         struct ib_uobject              *uninitialized_var(xrcd_uobj);
1792         struct ib_qp                   *qp;
1793         struct ib_qp_open_attr          attr;
1794         int ret;
1795
1796         if (out_len < sizeof resp)
1797                 return -ENOSPC;
1798
1799         if (copy_from_user(&cmd, buf, sizeof cmd))
1800                 return -EFAULT;
1801
1802         INIT_UDATA(&udata, buf + sizeof cmd,
1803                    (unsigned long) cmd.response + sizeof resp,
1804                    in_len - sizeof cmd, out_len - sizeof resp);
1805
1806         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1807         if (!obj)
1808                 return -ENOMEM;
1809
1810         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1811         down_write(&obj->uevent.uobject.mutex);
1812
1813         xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1814         if (!xrcd) {
1815                 ret = -EINVAL;
1816                 goto err_put;
1817         }
1818
1819         attr.event_handler = ib_uverbs_qp_event_handler;
1820         attr.qp_context    = file;
1821         attr.qp_num        = cmd.qpn;
1822         attr.qp_type       = cmd.qp_type;
1823
1824         obj->uevent.events_reported = 0;
1825         INIT_LIST_HEAD(&obj->uevent.event_list);
1826         INIT_LIST_HEAD(&obj->mcast_list);
1827
1828         qp = ib_open_qp(xrcd, &attr);
1829         if (IS_ERR(qp)) {
1830                 ret = PTR_ERR(qp);
1831                 goto err_put;
1832         }
1833
1834         qp->uobject = &obj->uevent.uobject;
1835
1836         obj->uevent.uobject.object = qp;
1837         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1838         if (ret)
1839                 goto err_destroy;
1840
1841         memset(&resp, 0, sizeof resp);
1842         resp.qpn       = qp->qp_num;
1843         resp.qp_handle = obj->uevent.uobject.id;
1844
1845         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1846                          &resp, sizeof resp)) {
1847                 ret = -EFAULT;
1848                 goto err_remove;
1849         }
1850
1851         obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1852         atomic_inc(&obj->uxrcd->refcnt);
1853         put_xrcd_read(xrcd_uobj);
1854
1855         mutex_lock(&file->mutex);
1856         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1857         mutex_unlock(&file->mutex);
1858
1859         obj->uevent.uobject.live = 1;
1860
1861         up_write(&obj->uevent.uobject.mutex);
1862
1863         return in_len;
1864
1865 err_remove:
1866         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1867
1868 err_destroy:
1869         ib_destroy_qp(qp);
1870
1871 err_put:
1872         put_xrcd_read(xrcd_uobj);
1873         put_uobj_write(&obj->uevent.uobject);
1874         return ret;
1875 }
1876
1877 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1878                            const char __user *buf, int in_len,
1879                            int out_len)
1880 {
1881         struct ib_uverbs_query_qp      cmd;
1882         struct ib_uverbs_query_qp_resp resp;
1883         struct ib_qp                   *qp;
1884         struct ib_qp_attr              *attr;
1885         struct ib_qp_init_attr         *init_attr;
1886         int                            ret;
1887
1888         if (copy_from_user(&cmd, buf, sizeof cmd))
1889                 return -EFAULT;
1890
1891         attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1892         init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1893         if (!attr || !init_attr) {
1894                 ret = -ENOMEM;
1895                 goto out;
1896         }
1897
1898         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1899         if (!qp) {
1900                 ret = -EINVAL;
1901                 goto out;
1902         }
1903
1904         ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1905
1906         put_qp_read(qp);
1907
1908         if (ret)
1909                 goto out;
1910
1911         memset(&resp, 0, sizeof resp);
1912
1913         resp.qp_state               = attr->qp_state;
1914         resp.cur_qp_state           = attr->cur_qp_state;
1915         resp.path_mtu               = attr->path_mtu;
1916         resp.path_mig_state         = attr->path_mig_state;
1917         resp.qkey                   = attr->qkey;
1918         resp.rq_psn                 = attr->rq_psn;
1919         resp.sq_psn                 = attr->sq_psn;
1920         resp.dest_qp_num            = attr->dest_qp_num;
1921         resp.qp_access_flags        = attr->qp_access_flags;
1922         resp.pkey_index             = attr->pkey_index;
1923         resp.alt_pkey_index         = attr->alt_pkey_index;
1924         resp.sq_draining            = attr->sq_draining;
1925         resp.max_rd_atomic          = attr->max_rd_atomic;
1926         resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1927         resp.min_rnr_timer          = attr->min_rnr_timer;
1928         resp.port_num               = attr->port_num;
1929         resp.timeout                = attr->timeout;
1930         resp.retry_cnt              = attr->retry_cnt;
1931         resp.rnr_retry              = attr->rnr_retry;
1932         resp.alt_port_num           = attr->alt_port_num;
1933         resp.alt_timeout            = attr->alt_timeout;
1934
1935         memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1936         resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1937         resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1938         resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1939         resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1940         resp.dest.dlid              = attr->ah_attr.dlid;
1941         resp.dest.sl                = attr->ah_attr.sl;
1942         resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1943         resp.dest.static_rate       = attr->ah_attr.static_rate;
1944         resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1945         resp.dest.port_num          = attr->ah_attr.port_num;
1946
1947         memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1948         resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1949         resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1950         resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1951         resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1952         resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1953         resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1954         resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1955         resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1956         resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1957         resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1958
1959         resp.max_send_wr            = init_attr->cap.max_send_wr;
1960         resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1961         resp.max_send_sge           = init_attr->cap.max_send_sge;
1962         resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1963         resp.max_inline_data        = init_attr->cap.max_inline_data;
1964         resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1965
1966         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1967                          &resp, sizeof resp))
1968                 ret = -EFAULT;
1969
1970 out:
1971         kfree(attr);
1972         kfree(init_attr);
1973
1974         return ret ? ret : in_len;
1975 }
1976
1977 /* Remove ignored fields set in the attribute mask */
1978 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1979 {
1980         switch (qp_type) {
1981         case IB_QPT_XRC_INI:
1982                 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1983         case IB_QPT_XRC_TGT:
1984                 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1985                                 IB_QP_RNR_RETRY);
1986         default:
1987                 return mask;
1988         }
1989 }
1990
1991 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1992                             const char __user *buf, int in_len,
1993                             int out_len)
1994 {
1995         struct ib_uverbs_modify_qp cmd;
1996         struct ib_udata            udata;
1997         struct ib_qp              *qp;
1998         struct ib_qp_attr         *attr;
1999         int                        ret;
2000
2001         if (copy_from_user(&cmd, buf, sizeof cmd))
2002                 return -EFAULT;
2003
2004         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2005                    out_len);
2006
2007         attr = kmalloc(sizeof *attr, GFP_KERNEL);
2008         if (!attr)
2009                 return -ENOMEM;
2010
2011         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2012         if (!qp) {
2013                 ret = -EINVAL;
2014                 goto out;
2015         }
2016
2017         attr->qp_state            = cmd.qp_state;
2018         attr->cur_qp_state        = cmd.cur_qp_state;
2019         attr->path_mtu            = cmd.path_mtu;
2020         attr->path_mig_state      = cmd.path_mig_state;
2021         attr->qkey                = cmd.qkey;
2022         attr->rq_psn              = cmd.rq_psn;
2023         attr->sq_psn              = cmd.sq_psn;
2024         attr->dest_qp_num         = cmd.dest_qp_num;
2025         attr->qp_access_flags     = cmd.qp_access_flags;
2026         attr->pkey_index          = cmd.pkey_index;
2027         attr->alt_pkey_index      = cmd.alt_pkey_index;
2028         attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2029         attr->max_rd_atomic       = cmd.max_rd_atomic;
2030         attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
2031         attr->min_rnr_timer       = cmd.min_rnr_timer;
2032         attr->port_num            = cmd.port_num;
2033         attr->timeout             = cmd.timeout;
2034         attr->retry_cnt           = cmd.retry_cnt;
2035         attr->rnr_retry           = cmd.rnr_retry;
2036         attr->alt_port_num        = cmd.alt_port_num;
2037         attr->alt_timeout         = cmd.alt_timeout;
2038
2039         memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2040         attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
2041         attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
2042         attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
2043         attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
2044         attr->ah_attr.dlid                  = cmd.dest.dlid;
2045         attr->ah_attr.sl                    = cmd.dest.sl;
2046         attr->ah_attr.src_path_bits         = cmd.dest.src_path_bits;
2047         attr->ah_attr.static_rate           = cmd.dest.static_rate;
2048         attr->ah_attr.ah_flags              = cmd.dest.is_global ? IB_AH_GRH : 0;
2049         attr->ah_attr.port_num              = cmd.dest.port_num;
2050
2051         memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2052         attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
2053         attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
2054         attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
2055         attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2056         attr->alt_ah_attr.dlid              = cmd.alt_dest.dlid;
2057         attr->alt_ah_attr.sl                = cmd.alt_dest.sl;
2058         attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
2059         attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
2060         attr->alt_ah_attr.ah_flags          = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2061         attr->alt_ah_attr.port_num          = cmd.alt_dest.port_num;
2062
2063         if (qp->real_qp == qp) {
2064                 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2065                 if (ret)
2066                         goto out;
2067                 ret = qp->device->modify_qp(qp, attr,
2068                         modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2069         } else {
2070                 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2071         }
2072
2073         put_qp_read(qp);
2074
2075         if (ret)
2076                 goto out;
2077
2078         ret = in_len;
2079
2080 out:
2081         kfree(attr);
2082
2083         return ret;
2084 }
2085
2086 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2087                              const char __user *buf, int in_len,
2088                              int out_len)
2089 {
2090         struct ib_uverbs_destroy_qp      cmd;
2091         struct ib_uverbs_destroy_qp_resp resp;
2092         struct ib_uobject               *uobj;
2093         struct ib_qp                    *qp;
2094         struct ib_uqp_object            *obj;
2095         int                              ret = -EINVAL;
2096
2097         if (copy_from_user(&cmd, buf, sizeof cmd))
2098                 return -EFAULT;
2099
2100         memset(&resp, 0, sizeof resp);
2101
2102         uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2103         if (!uobj)
2104                 return -EINVAL;
2105         qp  = uobj->object;
2106         obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2107
2108         if (!list_empty(&obj->mcast_list)) {
2109                 put_uobj_write(uobj);
2110                 return -EBUSY;
2111         }
2112
2113         ret = ib_destroy_qp(qp);
2114         if (!ret)
2115                 uobj->live = 0;
2116
2117         put_uobj_write(uobj);
2118
2119         if (ret)
2120                 return ret;
2121
2122         if (obj->uxrcd)
2123                 atomic_dec(&obj->uxrcd->refcnt);
2124
2125         idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2126
2127         mutex_lock(&file->mutex);
2128         list_del(&uobj->list);
2129         mutex_unlock(&file->mutex);
2130
2131         ib_uverbs_release_uevent(file, &obj->uevent);
2132
2133         resp.events_reported = obj->uevent.events_reported;
2134
2135         put_uobj(uobj);
2136
2137         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2138                          &resp, sizeof resp))
2139                 return -EFAULT;
2140
2141         return in_len;
2142 }
2143
2144 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2145                             const char __user *buf, int in_len,
2146                             int out_len)
2147 {
2148         struct ib_uverbs_post_send      cmd;
2149         struct ib_uverbs_post_send_resp resp;
2150         struct ib_uverbs_send_wr       *user_wr;
2151         struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
2152         struct ib_qp                   *qp;
2153         int                             i, sg_ind;
2154         int                             is_ud;
2155         ssize_t                         ret = -EINVAL;
2156
2157         if (copy_from_user(&cmd, buf, sizeof cmd))
2158                 return -EFAULT;
2159
2160         if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2161             cmd.sge_count * sizeof (struct ib_uverbs_sge))
2162                 return -EINVAL;
2163
2164         if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2165                 return -EINVAL;
2166
2167         user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2168         if (!user_wr)
2169                 return -ENOMEM;
2170
2171         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2172         if (!qp)
2173                 goto out;
2174
2175         is_ud = qp->qp_type == IB_QPT_UD;
2176         sg_ind = 0;
2177         last = NULL;
2178         for (i = 0; i < cmd.wr_count; ++i) {
2179                 if (copy_from_user(user_wr,
2180                                    buf + sizeof cmd + i * cmd.wqe_size,
2181                                    cmd.wqe_size)) {
2182                         ret = -EFAULT;
2183                         goto out_put;
2184                 }
2185
2186                 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2187                         ret = -EINVAL;
2188                         goto out_put;
2189                 }
2190
2191                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2192                                user_wr->num_sge * sizeof (struct ib_sge),
2193                                GFP_KERNEL);
2194                 if (!next) {
2195                         ret = -ENOMEM;
2196                         goto out_put;
2197                 }
2198
2199                 if (!last)
2200                         wr = next;
2201                 else
2202                         last->next = next;
2203                 last = next;
2204
2205                 next->next       = NULL;
2206                 next->wr_id      = user_wr->wr_id;
2207                 next->num_sge    = user_wr->num_sge;
2208                 next->opcode     = user_wr->opcode;
2209                 next->send_flags = user_wr->send_flags;
2210
2211                 if (is_ud) {
2212                         next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2213                                                      file->ucontext);
2214                         if (!next->wr.ud.ah) {
2215                                 ret = -EINVAL;
2216                                 goto out_put;
2217                         }
2218                         next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
2219                         next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2220                         if (next->opcode == IB_WR_SEND_WITH_IMM)
2221                                 next->ex.imm_data =
2222                                         (__be32 __force) user_wr->ex.imm_data;
2223                 } else {
2224                         switch (next->opcode) {
2225                         case IB_WR_RDMA_WRITE_WITH_IMM:
2226                                 next->ex.imm_data =
2227                                         (__be32 __force) user_wr->ex.imm_data;
2228                         case IB_WR_RDMA_WRITE:
2229                         case IB_WR_RDMA_READ:
2230                                 next->wr.rdma.remote_addr =
2231                                         user_wr->wr.rdma.remote_addr;
2232                                 next->wr.rdma.rkey        =
2233                                         user_wr->wr.rdma.rkey;
2234                                 break;
2235                         case IB_WR_SEND_WITH_IMM:
2236                                 next->ex.imm_data =
2237                                         (__be32 __force) user_wr->ex.imm_data;
2238                                 break;
2239                         case IB_WR_SEND_WITH_INV:
2240                                 next->ex.invalidate_rkey =
2241                                         user_wr->ex.invalidate_rkey;
2242                                 break;
2243                         case IB_WR_ATOMIC_CMP_AND_SWP:
2244                         case IB_WR_ATOMIC_FETCH_AND_ADD:
2245                                 next->wr.atomic.remote_addr =
2246                                         user_wr->wr.atomic.remote_addr;
2247                                 next->wr.atomic.compare_add =
2248                                         user_wr->wr.atomic.compare_add;
2249                                 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2250                                 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2251                                 break;
2252                         default:
2253                                 break;
2254                         }
2255                 }
2256
2257                 if (next->num_sge) {
2258                         next->sg_list = (void *) next +
2259                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2260                         if (copy_from_user(next->sg_list,
2261                                            buf + sizeof cmd +
2262                                            cmd.wr_count * cmd.wqe_size +
2263                                            sg_ind * sizeof (struct ib_sge),
2264                                            next->num_sge * sizeof (struct ib_sge))) {
2265                                 ret = -EFAULT;
2266                                 goto out_put;
2267                         }
2268                         sg_ind += next->num_sge;
2269                 } else
2270                         next->sg_list = NULL;
2271         }
2272
2273         resp.bad_wr = 0;
2274         ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2275         if (ret)
2276                 for (next = wr; next; next = next->next) {
2277                         ++resp.bad_wr;
2278                         if (next == bad_wr)
2279                                 break;
2280                 }
2281
2282         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2283                          &resp, sizeof resp))
2284                 ret = -EFAULT;
2285
2286 out_put:
2287         put_qp_read(qp);
2288
2289         while (wr) {
2290                 if (is_ud && wr->wr.ud.ah)
2291                         put_ah_read(wr->wr.ud.ah);
2292                 next = wr->next;
2293                 kfree(wr);
2294                 wr = next;
2295         }
2296
2297 out:
2298         kfree(user_wr);
2299
2300         return ret ? ret : in_len;
2301 }
2302
2303 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2304                                                     int in_len,
2305                                                     u32 wr_count,
2306                                                     u32 sge_count,
2307                                                     u32 wqe_size)
2308 {
2309         struct ib_uverbs_recv_wr *user_wr;
2310         struct ib_recv_wr        *wr = NULL, *last, *next;
2311         int                       sg_ind;
2312         int                       i;
2313         int                       ret;
2314
2315         if (in_len < wqe_size * wr_count +
2316             sge_count * sizeof (struct ib_uverbs_sge))
2317                 return ERR_PTR(-EINVAL);
2318
2319         if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2320                 return ERR_PTR(-EINVAL);
2321
2322         user_wr = kmalloc(wqe_size, GFP_KERNEL);
2323         if (!user_wr)
2324                 return ERR_PTR(-ENOMEM);
2325
2326         sg_ind = 0;
2327         last = NULL;
2328         for (i = 0; i < wr_count; ++i) {
2329                 if (copy_from_user(user_wr, buf + i * wqe_size,
2330                                    wqe_size)) {
2331                         ret = -EFAULT;
2332                         goto err;
2333                 }
2334
2335                 if (user_wr->num_sge + sg_ind > sge_count) {
2336                         ret = -EINVAL;
2337                         goto err;
2338                 }
2339
2340                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2341                                user_wr->num_sge * sizeof (struct ib_sge),
2342                                GFP_KERNEL);
2343                 if (!next) {
2344                         ret = -ENOMEM;
2345                         goto err;
2346                 }
2347
2348                 if (!last)
2349                         wr = next;
2350                 else
2351                         last->next = next;
2352                 last = next;
2353
2354                 next->next       = NULL;
2355                 next->wr_id      = user_wr->wr_id;
2356                 next->num_sge    = user_wr->num_sge;
2357
2358                 if (next->num_sge) {
2359                         next->sg_list = (void *) next +
2360                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2361                         if (copy_from_user(next->sg_list,
2362                                            buf + wr_count * wqe_size +
2363                                            sg_ind * sizeof (struct ib_sge),
2364                                            next->num_sge * sizeof (struct ib_sge))) {
2365                                 ret = -EFAULT;
2366                                 goto err;
2367                         }
2368                         sg_ind += next->num_sge;
2369                 } else
2370                         next->sg_list = NULL;
2371         }
2372
2373         kfree(user_wr);
2374         return wr;
2375
2376 err:
2377         kfree(user_wr);
2378
2379         while (wr) {
2380                 next = wr->next;
2381                 kfree(wr);
2382                 wr = next;
2383         }
2384
2385         return ERR_PTR(ret);
2386 }
2387
2388 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2389                             const char __user *buf, int in_len,
2390                             int out_len)
2391 {
2392         struct ib_uverbs_post_recv      cmd;
2393         struct ib_uverbs_post_recv_resp resp;
2394         struct ib_recv_wr              *wr, *next, *bad_wr;
2395         struct ib_qp                   *qp;
2396         ssize_t                         ret = -EINVAL;
2397
2398         if (copy_from_user(&cmd, buf, sizeof cmd))
2399                 return -EFAULT;
2400
2401         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2402                                        in_len - sizeof cmd, cmd.wr_count,
2403                                        cmd.sge_count, cmd.wqe_size);
2404         if (IS_ERR(wr))
2405                 return PTR_ERR(wr);
2406
2407         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2408         if (!qp)
2409                 goto out;
2410
2411         resp.bad_wr = 0;
2412         ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2413
2414         put_qp_read(qp);
2415
2416         if (ret)
2417                 for (next = wr; next; next = next->next) {
2418                         ++resp.bad_wr;
2419                         if (next == bad_wr)
2420                                 break;
2421                 }
2422
2423         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2424                          &resp, sizeof resp))
2425                 ret = -EFAULT;
2426
2427 out:
2428         while (wr) {
2429                 next = wr->next;
2430                 kfree(wr);
2431                 wr = next;
2432         }
2433
2434         return ret ? ret : in_len;
2435 }
2436
2437 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2438                                 const char __user *buf, int in_len,
2439                                 int out_len)
2440 {
2441         struct ib_uverbs_post_srq_recv      cmd;
2442         struct ib_uverbs_post_srq_recv_resp resp;
2443         struct ib_recv_wr                  *wr, *next, *bad_wr;
2444         struct ib_srq                      *srq;
2445         ssize_t                             ret = -EINVAL;
2446
2447         if (copy_from_user(&cmd, buf, sizeof cmd))
2448                 return -EFAULT;
2449
2450         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2451                                        in_len - sizeof cmd, cmd.wr_count,
2452                                        cmd.sge_count, cmd.wqe_size);
2453         if (IS_ERR(wr))
2454                 return PTR_ERR(wr);
2455
2456         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2457         if (!srq)
2458                 goto out;
2459
2460         resp.bad_wr = 0;
2461         ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2462
2463         put_srq_read(srq);
2464
2465         if (ret)
2466                 for (next = wr; next; next = next->next) {
2467                         ++resp.bad_wr;
2468                         if (next == bad_wr)
2469                                 break;
2470                 }
2471
2472         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2473                          &resp, sizeof resp))
2474                 ret = -EFAULT;
2475
2476 out:
2477         while (wr) {
2478                 next = wr->next;
2479                 kfree(wr);
2480                 wr = next;
2481         }
2482
2483         return ret ? ret : in_len;
2484 }
2485
2486 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2487                             const char __user *buf, int in_len,
2488                             int out_len)
2489 {
2490         struct ib_uverbs_create_ah       cmd;
2491         struct ib_uverbs_create_ah_resp  resp;
2492         struct ib_uobject               *uobj;
2493         struct ib_pd                    *pd;
2494         struct ib_ah                    *ah;
2495         struct ib_ah_attr               attr;
2496         int ret;
2497
2498         if (out_len < sizeof resp)
2499                 return -ENOSPC;
2500
2501         if (copy_from_user(&cmd, buf, sizeof cmd))
2502                 return -EFAULT;
2503
2504         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2505         if (!uobj)
2506                 return -ENOMEM;
2507
2508         init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2509         down_write(&uobj->mutex);
2510
2511         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2512         if (!pd) {
2513                 ret = -EINVAL;
2514                 goto err;
2515         }
2516
2517         attr.dlid              = cmd.attr.dlid;
2518         attr.sl                = cmd.attr.sl;
2519         attr.src_path_bits     = cmd.attr.src_path_bits;
2520         attr.static_rate       = cmd.attr.static_rate;
2521         attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
2522         attr.port_num          = cmd.attr.port_num;
2523         attr.grh.flow_label    = cmd.attr.grh.flow_label;
2524         attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
2525         attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
2526         attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2527         attr.vlan_id           = 0;
2528         memset(&attr.dmac, 0, sizeof(attr.dmac));
2529         memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2530
2531         ah = ib_create_ah(pd, &attr);
2532         if (IS_ERR(ah)) {
2533                 ret = PTR_ERR(ah);
2534                 goto err_put;
2535         }
2536
2537         ah->uobject  = uobj;
2538         uobj->object = ah;
2539
2540         ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2541         if (ret)
2542                 goto err_destroy;
2543
2544         resp.ah_handle = uobj->id;
2545
2546         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2547                          &resp, sizeof resp)) {
2548                 ret = -EFAULT;
2549                 goto err_copy;
2550         }
2551
2552         put_pd_read(pd);
2553
2554         mutex_lock(&file->mutex);
2555         list_add_tail(&uobj->list, &file->ucontext->ah_list);
2556         mutex_unlock(&file->mutex);
2557
2558         uobj->live = 1;
2559
2560         up_write(&uobj->mutex);
2561
2562         return in_len;
2563
2564 err_copy:
2565         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2566
2567 err_destroy:
2568         ib_destroy_ah(ah);
2569
2570 err_put:
2571         put_pd_read(pd);
2572
2573 err:
2574         put_uobj_write(uobj);
2575         return ret;
2576 }
2577
2578 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2579                              const char __user *buf, int in_len, int out_len)
2580 {
2581         struct ib_uverbs_destroy_ah cmd;
2582         struct ib_ah               *ah;
2583         struct ib_uobject          *uobj;
2584         int                         ret;
2585
2586         if (copy_from_user(&cmd, buf, sizeof cmd))
2587                 return -EFAULT;
2588
2589         uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2590         if (!uobj)
2591                 return -EINVAL;
2592         ah = uobj->object;
2593
2594         ret = ib_destroy_ah(ah);
2595         if (!ret)
2596                 uobj->live = 0;
2597
2598         put_uobj_write(uobj);
2599
2600         if (ret)
2601                 return ret;
2602
2603         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2604
2605         mutex_lock(&file->mutex);
2606         list_del(&uobj->list);
2607         mutex_unlock(&file->mutex);
2608
2609         put_uobj(uobj);
2610
2611         return in_len;
2612 }
2613
2614 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2615                                const char __user *buf, int in_len,
2616                                int out_len)
2617 {
2618         struct ib_uverbs_attach_mcast cmd;
2619         struct ib_qp                 *qp;
2620         struct ib_uqp_object         *obj;
2621         struct ib_uverbs_mcast_entry *mcast;
2622         int                           ret;
2623
2624         if (copy_from_user(&cmd, buf, sizeof cmd))
2625                 return -EFAULT;
2626
2627         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2628         if (!qp)
2629                 return -EINVAL;
2630
2631         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2632
2633         list_for_each_entry(mcast, &obj->mcast_list, list)
2634                 if (cmd.mlid == mcast->lid &&
2635                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2636                         ret = 0;
2637                         goto out_put;
2638                 }
2639
2640         mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2641         if (!mcast) {
2642                 ret = -ENOMEM;
2643                 goto out_put;
2644         }
2645
2646         mcast->lid = cmd.mlid;
2647         memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2648
2649         ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2650         if (!ret)
2651                 list_add_tail(&mcast->list, &obj->mcast_list);
2652         else
2653                 kfree(mcast);
2654
2655 out_put:
2656         put_qp_write(qp);
2657
2658         return ret ? ret : in_len;
2659 }
2660
2661 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2662                                const char __user *buf, int in_len,
2663                                int out_len)
2664 {
2665         struct ib_uverbs_detach_mcast cmd;
2666         struct ib_uqp_object         *obj;
2667         struct ib_qp                 *qp;
2668         struct ib_uverbs_mcast_entry *mcast;
2669         int                           ret = -EINVAL;
2670
2671         if (copy_from_user(&cmd, buf, sizeof cmd))
2672                 return -EFAULT;
2673
2674         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2675         if (!qp)
2676                 return -EINVAL;
2677
2678         ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2679         if (ret)
2680                 goto out_put;
2681
2682         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2683
2684         list_for_each_entry(mcast, &obj->mcast_list, list)
2685                 if (cmd.mlid == mcast->lid &&
2686                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2687                         list_del(&mcast->list);
2688                         kfree(mcast);
2689                         break;
2690                 }
2691
2692 out_put:
2693         put_qp_write(qp);
2694
2695         return ret ? ret : in_len;
2696 }
2697
2698 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2699                                 union ib_flow_spec *ib_spec)
2700 {
2701         if (kern_spec->reserved)
2702                 return -EINVAL;
2703
2704         ib_spec->type = kern_spec->type;
2705
2706         switch (ib_spec->type) {
2707         case IB_FLOW_SPEC_ETH:
2708                 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2709                 if (ib_spec->eth.size != kern_spec->eth.size)
2710                         return -EINVAL;
2711                 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2712                        sizeof(struct ib_flow_eth_filter));
2713                 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2714                        sizeof(struct ib_flow_eth_filter));
2715                 break;
2716         case IB_FLOW_SPEC_IPV4:
2717                 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2718                 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2719                         return -EINVAL;
2720                 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2721                        sizeof(struct ib_flow_ipv4_filter));
2722                 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2723                        sizeof(struct ib_flow_ipv4_filter));
2724                 break;
2725         case IB_FLOW_SPEC_TCP:
2726         case IB_FLOW_SPEC_UDP:
2727                 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2728                 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2729                         return -EINVAL;
2730                 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2731                        sizeof(struct ib_flow_tcp_udp_filter));
2732                 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2733                        sizeof(struct ib_flow_tcp_udp_filter));
2734                 break;
2735         default:
2736                 return -EINVAL;
2737         }
2738         return 0;
2739 }
2740
2741 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2742                              struct ib_udata *ucore,
2743                              struct ib_udata *uhw)
2744 {
2745         struct ib_uverbs_create_flow      cmd;
2746         struct ib_uverbs_create_flow_resp resp;
2747         struct ib_uobject                 *uobj;
2748         struct ib_flow                    *flow_id;
2749         struct ib_uverbs_flow_attr        *kern_flow_attr;
2750         struct ib_flow_attr               *flow_attr;
2751         struct ib_qp                      *qp;
2752         int err = 0;
2753         void *kern_spec;
2754         void *ib_spec;
2755         int i;
2756
2757         if (ucore->inlen < sizeof(cmd))
2758                 return -EINVAL;
2759
2760         if (ucore->outlen < sizeof(resp))
2761                 return -ENOSPC;
2762
2763         err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2764         if (err)
2765                 return err;
2766
2767         ucore->inbuf += sizeof(cmd);
2768         ucore->inlen -= sizeof(cmd);
2769
2770         if (cmd.comp_mask)
2771                 return -EINVAL;
2772
2773         if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2774              !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2775                 return -EPERM;
2776
2777         if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2778                 return -EINVAL;
2779
2780         if (cmd.flow_attr.size > ucore->inlen ||
2781             cmd.flow_attr.size >
2782             (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2783                 return -EINVAL;
2784
2785         if (cmd.flow_attr.reserved[0] ||
2786             cmd.flow_attr.reserved[1])
2787                 return -EINVAL;
2788
2789         if (cmd.flow_attr.num_of_specs) {
2790                 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2791                                          GFP_KERNEL);
2792                 if (!kern_flow_attr)
2793                         return -ENOMEM;
2794
2795                 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2796                 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2797                                          cmd.flow_attr.size);
2798                 if (err)
2799                         goto err_free_attr;
2800         } else {
2801                 kern_flow_attr = &cmd.flow_attr;
2802         }
2803
2804         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2805         if (!uobj) {
2806                 err = -ENOMEM;
2807                 goto err_free_attr;
2808         }
2809         init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2810         down_write(&uobj->mutex);
2811
2812         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2813         if (!qp) {
2814                 err = -EINVAL;
2815                 goto err_uobj;
2816         }
2817
2818         flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2819         if (!flow_attr) {
2820                 err = -ENOMEM;
2821                 goto err_put;
2822         }
2823
2824         flow_attr->type = kern_flow_attr->type;
2825         flow_attr->priority = kern_flow_attr->priority;
2826         flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2827         flow_attr->port = kern_flow_attr->port;
2828         flow_attr->flags = kern_flow_attr->flags;
2829         flow_attr->size = sizeof(*flow_attr);
2830
2831         kern_spec = kern_flow_attr + 1;
2832         ib_spec = flow_attr + 1;
2833         for (i = 0; i < flow_attr->num_of_specs &&
2834              cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
2835              cmd.flow_attr.size >=
2836              ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
2837                 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2838                 if (err)
2839                         goto err_free;
2840                 flow_attr->size +=
2841                         ((union ib_flow_spec *) ib_spec)->size;
2842                 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2843                 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
2844                 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2845         }
2846         if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2847                 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2848                         i, cmd.flow_attr.size);
2849                 err = -EINVAL;
2850                 goto err_free;
2851         }
2852         flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2853         if (IS_ERR(flow_id)) {
2854                 err = PTR_ERR(flow_id);
2855                 goto err_free;
2856         }
2857         flow_id->qp = qp;
2858         flow_id->uobject = uobj;
2859         uobj->object = flow_id;
2860
2861         err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2862         if (err)
2863                 goto destroy_flow;
2864
2865         memset(&resp, 0, sizeof(resp));
2866         resp.flow_handle = uobj->id;
2867
2868         err = ib_copy_to_udata(ucore,
2869                                &resp, sizeof(resp));
2870         if (err)
2871                 goto err_copy;
2872
2873         put_qp_read(qp);
2874         mutex_lock(&file->mutex);
2875         list_add_tail(&uobj->list, &file->ucontext->rule_list);
2876         mutex_unlock(&file->mutex);
2877
2878         uobj->live = 1;
2879
2880         up_write(&uobj->mutex);
2881         kfree(flow_attr);
2882         if (cmd.flow_attr.num_of_specs)
2883                 kfree(kern_flow_attr);
2884         return 0;
2885 err_copy:
2886         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2887 destroy_flow:
2888         ib_destroy_flow(flow_id);
2889 err_free:
2890         kfree(flow_attr);
2891 err_put:
2892         put_qp_read(qp);
2893 err_uobj:
2894         put_uobj_write(uobj);
2895 err_free_attr:
2896         if (cmd.flow_attr.num_of_specs)
2897                 kfree(kern_flow_attr);
2898         return err;
2899 }
2900
2901 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2902                               struct ib_udata *ucore,
2903                               struct ib_udata *uhw)
2904 {
2905         struct ib_uverbs_destroy_flow   cmd;
2906         struct ib_flow                  *flow_id;
2907         struct ib_uobject               *uobj;
2908         int                             ret;
2909
2910         if (ucore->inlen < sizeof(cmd))
2911                 return -EINVAL;
2912
2913         ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2914         if (ret)
2915                 return ret;
2916
2917         if (cmd.comp_mask)
2918                 return -EINVAL;
2919
2920         uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2921                               file->ucontext);
2922         if (!uobj)
2923                 return -EINVAL;
2924         flow_id = uobj->object;
2925
2926         ret = ib_destroy_flow(flow_id);
2927         if (!ret)
2928                 uobj->live = 0;
2929
2930         put_uobj_write(uobj);
2931
2932         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2933
2934         mutex_lock(&file->mutex);
2935         list_del(&uobj->list);
2936         mutex_unlock(&file->mutex);
2937
2938         put_uobj(uobj);
2939
2940         return ret;
2941 }
2942
2943 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2944                                 struct ib_uverbs_create_xsrq *cmd,
2945                                 struct ib_udata *udata)
2946 {
2947         struct ib_uverbs_create_srq_resp resp;
2948         struct ib_usrq_object           *obj;
2949         struct ib_pd                    *pd;
2950         struct ib_srq                   *srq;
2951         struct ib_uobject               *uninitialized_var(xrcd_uobj);
2952         struct ib_srq_init_attr          attr;
2953         int ret;
2954
2955         obj = kmalloc(sizeof *obj, GFP_KERNEL);
2956         if (!obj)
2957                 return -ENOMEM;
2958
2959         init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
2960         down_write(&obj->uevent.uobject.mutex);
2961
2962         if (cmd->srq_type == IB_SRQT_XRC) {
2963                 attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2964                 if (!attr.ext.xrc.xrcd) {
2965                         ret = -EINVAL;
2966                         goto err;
2967                 }
2968
2969                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2970                 atomic_inc(&obj->uxrcd->refcnt);
2971
2972                 attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2973                 if (!attr.ext.xrc.cq) {
2974                         ret = -EINVAL;
2975                         goto err_put_xrcd;
2976                 }
2977         }
2978
2979         pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
2980         if (!pd) {
2981                 ret = -EINVAL;
2982                 goto err_put_cq;
2983         }
2984
2985         attr.event_handler  = ib_uverbs_srq_event_handler;
2986         attr.srq_context    = file;
2987         attr.srq_type       = cmd->srq_type;
2988         attr.attr.max_wr    = cmd->max_wr;
2989         attr.attr.max_sge   = cmd->max_sge;
2990         attr.attr.srq_limit = cmd->srq_limit;
2991
2992         obj->uevent.events_reported = 0;
2993         INIT_LIST_HEAD(&obj->uevent.event_list);
2994
2995         srq = pd->device->create_srq(pd, &attr, udata);
2996         if (IS_ERR(srq)) {
2997                 ret = PTR_ERR(srq);
2998                 goto err_put;
2999         }
3000
3001         srq->device        = pd->device;
3002         srq->pd            = pd;
3003         srq->srq_type      = cmd->srq_type;
3004         srq->uobject       = &obj->uevent.uobject;
3005         srq->event_handler = attr.event_handler;
3006         srq->srq_context   = attr.srq_context;
3007
3008         if (cmd->srq_type == IB_SRQT_XRC) {
3009                 srq->ext.xrc.cq   = attr.ext.xrc.cq;
3010                 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3011                 atomic_inc(&attr.ext.xrc.cq->usecnt);
3012                 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3013         }
3014
3015         atomic_inc(&pd->usecnt);
3016         atomic_set(&srq->usecnt, 0);
3017
3018         obj->uevent.uobject.object = srq;
3019         ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3020         if (ret)
3021                 goto err_destroy;
3022
3023         memset(&resp, 0, sizeof resp);
3024         resp.srq_handle = obj->uevent.uobject.id;
3025         resp.max_wr     = attr.attr.max_wr;
3026         resp.max_sge    = attr.attr.max_sge;
3027         if (cmd->srq_type == IB_SRQT_XRC)
3028                 resp.srqn = srq->ext.xrc.srq_num;
3029
3030         if (copy_to_user((void __user *) (unsigned long) cmd->response,
3031                          &resp, sizeof resp)) {
3032                 ret = -EFAULT;
3033                 goto err_copy;
3034         }
3035
3036         if (cmd->srq_type == IB_SRQT_XRC) {
3037                 put_uobj_read(xrcd_uobj);
3038                 put_cq_read(attr.ext.xrc.cq);
3039         }
3040         put_pd_read(pd);
3041
3042         mutex_lock(&file->mutex);
3043         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3044         mutex_unlock(&file->mutex);
3045
3046         obj->uevent.uobject.live = 1;
3047
3048         up_write(&obj->uevent.uobject.mutex);
3049
3050         return 0;
3051
3052 err_copy:
3053         idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3054
3055 err_destroy:
3056         ib_destroy_srq(srq);
3057
3058 err_put:
3059         put_pd_read(pd);
3060
3061 err_put_cq:
3062         if (cmd->srq_type == IB_SRQT_XRC)
3063                 put_cq_read(attr.ext.xrc.cq);
3064
3065 err_put_xrcd:
3066         if (cmd->srq_type == IB_SRQT_XRC) {
3067                 atomic_dec(&obj->uxrcd->refcnt);
3068                 put_uobj_read(xrcd_uobj);
3069         }
3070
3071 err:
3072         put_uobj_write(&obj->uevent.uobject);
3073         return ret;
3074 }
3075
3076 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3077                              const char __user *buf, int in_len,
3078                              int out_len)
3079 {
3080         struct ib_uverbs_create_srq      cmd;
3081         struct ib_uverbs_create_xsrq     xcmd;
3082         struct ib_uverbs_create_srq_resp resp;
3083         struct ib_udata                  udata;
3084         int ret;
3085
3086         if (out_len < sizeof resp)
3087                 return -ENOSPC;
3088
3089         if (copy_from_user(&cmd, buf, sizeof cmd))
3090                 return -EFAULT;
3091
3092         xcmd.response    = cmd.response;
3093         xcmd.user_handle = cmd.user_handle;
3094         xcmd.srq_type    = IB_SRQT_BASIC;
3095         xcmd.pd_handle   = cmd.pd_handle;
3096         xcmd.max_wr      = cmd.max_wr;
3097         xcmd.max_sge     = cmd.max_sge;
3098         xcmd.srq_limit   = cmd.srq_limit;
3099
3100         INIT_UDATA(&udata, buf + sizeof cmd,
3101                    (unsigned long) cmd.response + sizeof resp,
3102                    in_len - sizeof cmd, out_len - sizeof resp);
3103
3104         ret = __uverbs_create_xsrq(file, &xcmd, &udata);
3105         if (ret)
3106                 return ret;
3107
3108         return in_len;
3109 }
3110
3111 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3112                               const char __user *buf, int in_len, int out_len)
3113 {
3114         struct ib_uverbs_create_xsrq     cmd;
3115         struct ib_uverbs_create_srq_resp resp;
3116         struct ib_udata                  udata;
3117         int ret;
3118
3119         if (out_len < sizeof resp)
3120                 return -ENOSPC;
3121
3122         if (copy_from_user(&cmd, buf, sizeof cmd))
3123                 return -EFAULT;
3124
3125         INIT_UDATA(&udata, buf + sizeof cmd,
3126                    (unsigned long) cmd.response + sizeof resp,
3127                    in_len - sizeof cmd, out_len - sizeof resp);
3128
3129         ret = __uverbs_create_xsrq(file, &cmd, &udata);
3130         if (ret)
3131                 return ret;
3132
3133         return in_len;
3134 }
3135
3136 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3137                              const char __user *buf, int in_len,
3138                              int out_len)
3139 {
3140         struct ib_uverbs_modify_srq cmd;
3141         struct ib_udata             udata;
3142         struct ib_srq              *srq;
3143         struct ib_srq_attr          attr;
3144         int                         ret;
3145
3146         if (copy_from_user(&cmd, buf, sizeof cmd))
3147                 return -EFAULT;
3148
3149         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3150                    out_len);
3151
3152         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3153         if (!srq)
3154                 return -EINVAL;
3155
3156         attr.max_wr    = cmd.max_wr;
3157         attr.srq_limit = cmd.srq_limit;
3158
3159         ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3160
3161         put_srq_read(srq);
3162
3163         return ret ? ret : in_len;
3164 }
3165
3166 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3167                             const char __user *buf,
3168                             int in_len, int out_len)
3169 {
3170         struct ib_uverbs_query_srq      cmd;
3171         struct ib_uverbs_query_srq_resp resp;
3172         struct ib_srq_attr              attr;
3173         struct ib_srq                   *srq;
3174         int                             ret;
3175
3176         if (out_len < sizeof resp)
3177                 return -ENOSPC;
3178
3179         if (copy_from_user(&cmd, buf, sizeof cmd))
3180                 return -EFAULT;
3181
3182         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3183         if (!srq)
3184                 return -EINVAL;
3185
3186         ret = ib_query_srq(srq, &attr);
3187
3188         put_srq_read(srq);
3189
3190         if (ret)
3191                 return ret;
3192
3193         memset(&resp, 0, sizeof resp);
3194
3195         resp.max_wr    = attr.max_wr;
3196         resp.max_sge   = attr.max_sge;
3197         resp.srq_limit = attr.srq_limit;
3198
3199         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3200                          &resp, sizeof resp))
3201                 return -EFAULT;
3202
3203         return in_len;
3204 }
3205
3206 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3207                               const char __user *buf, int in_len,
3208                               int out_len)
3209 {
3210         struct ib_uverbs_destroy_srq      cmd;
3211         struct ib_uverbs_destroy_srq_resp resp;
3212         struct ib_uobject                *uobj;
3213         struct ib_srq                    *srq;
3214         struct ib_uevent_object          *obj;
3215         int                               ret = -EINVAL;
3216         struct ib_usrq_object            *us;
3217         enum ib_srq_type                  srq_type;
3218
3219         if (copy_from_user(&cmd, buf, sizeof cmd))
3220                 return -EFAULT;
3221
3222         uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3223         if (!uobj)
3224                 return -EINVAL;
3225         srq = uobj->object;
3226         obj = container_of(uobj, struct ib_uevent_object, uobject);
3227         srq_type = srq->srq_type;
3228
3229         ret = ib_destroy_srq(srq);
3230         if (!ret)
3231                 uobj->live = 0;
3232
3233         put_uobj_write(uobj);
3234
3235         if (ret)
3236                 return ret;
3237
3238         if (srq_type == IB_SRQT_XRC) {
3239                 us = container_of(obj, struct ib_usrq_object, uevent);
3240                 atomic_dec(&us->uxrcd->refcnt);
3241         }
3242
3243         idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3244
3245         mutex_lock(&file->mutex);
3246         list_del(&uobj->list);
3247         mutex_unlock(&file->mutex);
3248
3249         ib_uverbs_release_uevent(file, obj);
3250
3251         memset(&resp, 0, sizeof resp);
3252         resp.events_reported = obj->events_reported;
3253
3254         put_uobj(uobj);
3255
3256         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3257                          &resp, sizeof resp))
3258                 ret = -EFAULT;
3259
3260         return ret ? ret : in_len;
3261 }
3262
3263 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3264                               struct ib_udata *ucore,
3265                               struct ib_udata *uhw)
3266 {
3267         struct ib_uverbs_ex_query_device_resp resp;
3268         struct ib_uverbs_ex_query_device  cmd;
3269         struct ib_device_attr attr;
3270         struct ib_device *device;
3271         int err;
3272
3273         device = file->device->ib_dev;
3274         if (ucore->inlen < sizeof(cmd))
3275                 return -EINVAL;
3276
3277         err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3278         if (err)
3279                 return err;
3280
3281         if (cmd.reserved)
3282                 return -EINVAL;
3283
3284         err = device->query_device(device, &attr);
3285         if (err)
3286                 return err;
3287
3288         memset(&resp, 0, sizeof(resp));
3289         copy_query_dev_fields(file, &resp.base, &attr);
3290         resp.comp_mask = 0;
3291
3292         err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
3293         if (err)
3294                 return err;
3295
3296         return 0;
3297 }