1/*
2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/file.h>
37#include <linux/fs.h>
38#include <linux/slab.h>
39
40#include <asm/uaccess.h>
41
42#include "uverbs.h"
43
44static struct lock_class_key pd_lock_key;
45static struct lock_class_key mr_lock_key;
46static struct lock_class_key cq_lock_key;
47static struct lock_class_key qp_lock_key;
48static struct lock_class_key ah_lock_key;
49static struct lock_class_key srq_lock_key;
50static struct lock_class_key xrcd_lock_key;
51
52#define INIT_UDATA(udata, ibuf, obuf, ilen, olen)			\
53	do {								\
54		(udata)->inbuf  = (void __user *) (ibuf);		\
55		(udata)->outbuf = (void __user *) (obuf);		\
56		(udata)->inlen  = (ilen);				\
57		(udata)->outlen = (olen);				\
58	} while (0)
59
60/*
61 * The ib_uobject locking scheme is as follows:
62 *
63 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
64 *   needs to be held during all idr operations.  When an object is
65 *   looked up, a reference must be taken on the object's kref before
66 *   dropping this lock.
67 *
68 * - Each object also has an rwsem.  This rwsem must be held for
69 *   reading while an operation that uses the object is performed.
70 *   For example, while registering an MR, the associated PD's
71 *   uobject.mutex must be held for reading.  The rwsem must be held
72 *   for writing while initializing or destroying an object.
73 *
74 * - In addition, each object has a "live" flag.  If this flag is not
75 *   set, then lookups of the object will fail even if it is found in
76 *   the idr.  This handles a reader that blocks and does not acquire
77 *   the rwsem until after the object is destroyed.  The destroy
78 *   operation will set the live flag to 0 and then drop the rwsem;
79 *   this will allow the reader to acquire the rwsem, see that the
80 *   live flag is 0, and then drop the rwsem and its reference to
81 *   object.  The underlying storage will not be freed until the last
82 *   reference to the object is dropped.
83 */
84
85static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
86		      struct ib_ucontext *context, struct lock_class_key *key)
87{
88	uobj->user_handle = user_handle;
89	uobj->context     = context;
90	kref_init(&uobj->ref);
91	init_rwsem(&uobj->mutex);
92	lockdep_set_class(&uobj->mutex, key);
93	uobj->live        = 0;
94}
95
96static void release_uobj(struct kref *kref)
97{
98	kfree(container_of(kref, struct ib_uobject, ref));
99}
100
101static void put_uobj(struct ib_uobject *uobj)
102{
103	kref_put(&uobj->ref, release_uobj);
104}
105
106static void put_uobj_read(struct ib_uobject *uobj)
107{
108	up_read(&uobj->mutex);
109	put_uobj(uobj);
110}
111
112static void put_uobj_write(struct ib_uobject *uobj)
113{
114	up_write(&uobj->mutex);
115	put_uobj(uobj);
116}
117
118static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
119{
120	int ret;
121
122retry:
123	if (!idr_pre_get(idr, GFP_KERNEL))
124		return -ENOMEM;
125
126	spin_lock(&ib_uverbs_idr_lock);
127	ret = idr_get_new(idr, uobj, &uobj->id);
128	spin_unlock(&ib_uverbs_idr_lock);
129
130	if (ret == -EAGAIN)
131		goto retry;
132
133	return ret;
134}
135
136void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
137{
138	spin_lock(&ib_uverbs_idr_lock);
139	idr_remove(idr, uobj->id);
140	spin_unlock(&ib_uverbs_idr_lock);
141}
142
143static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
144					 struct ib_ucontext *context)
145{
146	struct ib_uobject *uobj;
147
148	spin_lock(&ib_uverbs_idr_lock);
149	uobj = idr_find(idr, id);
150	if (uobj) {
151		if (uobj->context == context)
152			kref_get(&uobj->ref);
153		else
154			uobj = NULL;
155	}
156	spin_unlock(&ib_uverbs_idr_lock);
157
158	return uobj;
159}
160
161static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
162					struct ib_ucontext *context, int nested)
163{
164	struct ib_uobject *uobj;
165
166	uobj = __idr_get_uobj(idr, id, context);
167	if (!uobj)
168		return NULL;
169
170	if (nested)
171		down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
172	else
173		down_read(&uobj->mutex);
174	if (!uobj->live) {
175		put_uobj_read(uobj);
176		return NULL;
177	}
178
179	return uobj;
180}
181
182static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
183					 struct ib_ucontext *context)
184{
185	struct ib_uobject *uobj;
186
187	uobj = __idr_get_uobj(idr, id, context);
188	if (!uobj)
189		return NULL;
190
191	down_write(&uobj->mutex);
192	if (!uobj->live) {
193		put_uobj_write(uobj);
194		return NULL;
195	}
196
197	return uobj;
198}
199
200static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
201			  int nested)
202{
203	struct ib_uobject *uobj;
204
205	uobj = idr_read_uobj(idr, id, context, nested);
206	return uobj ? uobj->object : NULL;
207}
208
209static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
210{
211	return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
212}
213
214static void put_pd_read(struct ib_pd *pd)
215{
216	put_uobj_read(pd->uobject);
217}
218
219static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
220{
221	return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
222}
223
224static void put_cq_read(struct ib_cq *cq)
225{
226	put_uobj_read(cq->uobject);
227}
228
229static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
230{
231	return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
232}
233
234static void put_ah_read(struct ib_ah *ah)
235{
236	put_uobj_read(ah->uobject);
237}
238
239static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
240{
241	return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
242}
243
244static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
245{
246	struct ib_uobject *uobj;
247
248	uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
249	return uobj ? uobj->object : NULL;
250}
251
252static void put_qp_read(struct ib_qp *qp)
253{
254	put_uobj_read(qp->uobject);
255}
256
257static void put_qp_write(struct ib_qp *qp)
258{
259	put_uobj_write(qp->uobject);
260}
261
262static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
263{
264	return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
265}
266
267static void put_srq_read(struct ib_srq *srq)
268{
269	put_uobj_read(srq->uobject);
270}
271
272static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
273				     struct ib_uobject **uobj)
274{
275	*uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
276	return *uobj ? (*uobj)->object : NULL;
277}
278
279static void put_xrcd_read(struct ib_uobject *uobj)
280{
281	put_uobj_read(uobj);
282}
283
284ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
285			      const char __user *buf,
286			      int in_len, int out_len)
287{
288	struct ib_uverbs_get_context      cmd;
289	struct ib_uverbs_get_context_resp resp;
290	struct ib_udata                   udata;
291	struct ib_device                 *ibdev = file->device->ib_dev;
292	struct ib_ucontext		 *ucontext;
293	struct file			 *filp;
294	int ret;
295
296	if (out_len < sizeof resp)
297		return -ENOSPC;
298
299	if (copy_from_user(&cmd, buf, sizeof cmd))
300		return -EFAULT;
301
302	mutex_lock(&file->mutex);
303
304	if (file->ucontext) {
305		ret = -EINVAL;
306		goto err;
307	}
308
309	INIT_UDATA(&udata, buf + sizeof cmd,
310		   (unsigned long) cmd.response + sizeof resp,
311		   in_len - sizeof cmd, out_len - sizeof resp);
312
313	ucontext = ibdev->alloc_ucontext(ibdev, &udata);
314	if (IS_ERR(ucontext)) {
315		ret = PTR_ERR(ucontext);
316		goto err;
317	}
318
319	ucontext->device = ibdev;
320	INIT_LIST_HEAD(&ucontext->pd_list);
321	INIT_LIST_HEAD(&ucontext->mr_list);
322	INIT_LIST_HEAD(&ucontext->mw_list);
323	INIT_LIST_HEAD(&ucontext->cq_list);
324	INIT_LIST_HEAD(&ucontext->qp_list);
325	INIT_LIST_HEAD(&ucontext->srq_list);
326	INIT_LIST_HEAD(&ucontext->ah_list);
327	INIT_LIST_HEAD(&ucontext->xrcd_list);
328	ucontext->closing = 0;
329
330	resp.num_comp_vectors = file->device->num_comp_vectors;
331
332	ret = get_unused_fd();
333	if (ret < 0)
334		goto err_free;
335	resp.async_fd = ret;
336
337	filp = ib_uverbs_alloc_event_file(file, 1);
338	if (IS_ERR(filp)) {
339		ret = PTR_ERR(filp);
340		goto err_fd;
341	}
342
343	if (copy_to_user((void __user *) (unsigned long) cmd.response,
344			 &resp, sizeof resp)) {
345		ret = -EFAULT;
346		goto err_file;
347	}
348
349	file->async_file = filp->private_data;
350
351	INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
352			      ib_uverbs_event_handler);
353	ret = ib_register_event_handler(&file->event_handler);
354	if (ret)
355		goto err_file;
356
357	kref_get(&file->async_file->ref);
358	kref_get(&file->ref);
359	file->ucontext = ucontext;
360
361	fd_install(resp.async_fd, filp);
362
363	mutex_unlock(&file->mutex);
364
365	return in_len;
366
367err_file:
368	fput(filp);
369
370err_fd:
371	put_unused_fd(resp.async_fd);
372
373err_free:
374	ibdev->dealloc_ucontext(ucontext);
375
376err:
377	mutex_unlock(&file->mutex);
378	return ret;
379}
380
381ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
382			       const char __user *buf,
383			       int in_len, int out_len)
384{
385	struct ib_uverbs_query_device      cmd;
386	struct ib_uverbs_query_device_resp resp;
387	struct ib_device_attr              attr;
388	int                                ret;
389
390	if (out_len < sizeof resp)
391		return -ENOSPC;
392
393	if (copy_from_user(&cmd, buf, sizeof cmd))
394		return -EFAULT;
395
396	ret = ib_query_device(file->device->ib_dev, &attr);
397	if (ret)
398		return ret;
399
400	memset(&resp, 0, sizeof resp);
401
402	resp.fw_ver 		       = attr.fw_ver;
403	resp.node_guid 		       = file->device->ib_dev->node_guid;
404	resp.sys_image_guid 	       = attr.sys_image_guid;
405	resp.max_mr_size 	       = attr.max_mr_size;
406	resp.page_size_cap 	       = attr.page_size_cap;
407	resp.vendor_id 		       = attr.vendor_id;
408	resp.vendor_part_id 	       = attr.vendor_part_id;
409	resp.hw_ver 		       = attr.hw_ver;
410	resp.max_qp 		       = attr.max_qp;
411	resp.max_qp_wr 		       = attr.max_qp_wr;
412	resp.device_cap_flags 	       = attr.device_cap_flags;
413	resp.max_sge 		       = attr.max_sge;
414	resp.max_sge_rd 	       = attr.max_sge_rd;
415	resp.max_cq 		       = attr.max_cq;
416	resp.max_cqe 		       = attr.max_cqe;
417	resp.max_mr 		       = attr.max_mr;
418	resp.max_pd 		       = attr.max_pd;
419	resp.max_qp_rd_atom 	       = attr.max_qp_rd_atom;
420	resp.max_ee_rd_atom 	       = attr.max_ee_rd_atom;
421	resp.max_res_rd_atom 	       = attr.max_res_rd_atom;
422	resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
423	resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
424	resp.atomic_cap 	       = attr.atomic_cap;
425	resp.max_ee 		       = attr.max_ee;
426	resp.max_rdd 		       = attr.max_rdd;
427	resp.max_mw 		       = attr.max_mw;
428	resp.max_raw_ipv6_qp 	       = attr.max_raw_ipv6_qp;
429	resp.max_raw_ethy_qp 	       = attr.max_raw_ethy_qp;
430	resp.max_mcast_grp 	       = attr.max_mcast_grp;
431	resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
432	resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
433	resp.max_ah 		       = attr.max_ah;
434	resp.max_fmr 		       = attr.max_fmr;
435	resp.max_map_per_fmr 	       = attr.max_map_per_fmr;
436	resp.max_srq 		       = attr.max_srq;
437	resp.max_srq_wr 	       = attr.max_srq_wr;
438	resp.max_srq_sge 	       = attr.max_srq_sge;
439	resp.max_pkeys 		       = attr.max_pkeys;
440	resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
441	resp.phys_port_cnt	       = file->device->ib_dev->phys_port_cnt;
442
443	if (copy_to_user((void __user *) (unsigned long) cmd.response,
444			 &resp, sizeof resp))
445		return -EFAULT;
446
447	return in_len;
448}
449
450ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
451			     const char __user *buf,
452			     int in_len, int out_len)
453{
454	struct ib_uverbs_query_port      cmd;
455	struct ib_uverbs_query_port_resp resp;
456	struct ib_port_attr              attr;
457	int                              ret;
458
459	if (out_len < sizeof resp)
460		return -ENOSPC;
461
462	if (copy_from_user(&cmd, buf, sizeof cmd))
463		return -EFAULT;
464
465	ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
466	if (ret)
467		return ret;
468
469	memset(&resp, 0, sizeof resp);
470
471	resp.state 	     = attr.state;
472	resp.max_mtu 	     = attr.max_mtu;
473	resp.active_mtu      = attr.active_mtu;
474	resp.gid_tbl_len     = attr.gid_tbl_len;
475	resp.port_cap_flags  = attr.port_cap_flags;
476	resp.max_msg_sz      = attr.max_msg_sz;
477	resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
478	resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
479	resp.pkey_tbl_len    = attr.pkey_tbl_len;
480	resp.lid 	     = attr.lid;
481	resp.sm_lid 	     = attr.sm_lid;
482	resp.lmc 	     = attr.lmc;
483	resp.max_vl_num      = attr.max_vl_num;
484	resp.sm_sl 	     = attr.sm_sl;
485	resp.subnet_timeout  = attr.subnet_timeout;
486	resp.init_type_reply = attr.init_type_reply;
487	resp.active_width    = attr.active_width;
488	resp.active_speed    = attr.active_speed;
489	resp.phys_state      = attr.phys_state;
490	resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
491							cmd.port_num);
492
493	if (copy_to_user((void __user *) (unsigned long) cmd.response,
494			 &resp, sizeof resp))
495		return -EFAULT;
496
497	return in_len;
498}
499
500ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
501			   const char __user *buf,
502			   int in_len, int out_len)
503{
504	struct ib_uverbs_alloc_pd      cmd;
505	struct ib_uverbs_alloc_pd_resp resp;
506	struct ib_udata                udata;
507	struct ib_uobject             *uobj;
508	struct ib_pd                  *pd;
509	int                            ret;
510
511	if (out_len < sizeof resp)
512		return -ENOSPC;
513
514	if (copy_from_user(&cmd, buf, sizeof cmd))
515		return -EFAULT;
516
517	INIT_UDATA(&udata, buf + sizeof cmd,
518		   (unsigned long) cmd.response + sizeof resp,
519		   in_len - sizeof cmd, out_len - sizeof resp);
520
521	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
522	if (!uobj)
523		return -ENOMEM;
524
525	init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
526	down_write(&uobj->mutex);
527
528	pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
529					    file->ucontext, &udata);
530	if (IS_ERR(pd)) {
531		ret = PTR_ERR(pd);
532		goto err;
533	}
534
535	pd->device  = file->device->ib_dev;
536	pd->uobject = uobj;
537	atomic_set(&pd->usecnt, 0);
538
539	uobj->object = pd;
540	ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
541	if (ret)
542		goto err_idr;
543
544	memset(&resp, 0, sizeof resp);
545	resp.pd_handle = uobj->id;
546
547	if (copy_to_user((void __user *) (unsigned long) cmd.response,
548			 &resp, sizeof resp)) {
549		ret = -EFAULT;
550		goto err_copy;
551	}
552
553	mutex_lock(&file->mutex);
554	list_add_tail(&uobj->list, &file->ucontext->pd_list);
555	mutex_unlock(&file->mutex);
556
557	uobj->live = 1;
558
559	up_write(&uobj->mutex);
560
561	return in_len;
562
563err_copy:
564	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
565
566err_idr:
567	ib_dealloc_pd(pd);
568
569err:
570	put_uobj_write(uobj);
571	return ret;
572}
573
574ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
575			     const char __user *buf,
576			     int in_len, int out_len)
577{
578	struct ib_uverbs_dealloc_pd cmd;
579	struct ib_uobject          *uobj;
580	int                         ret;
581
582	if (copy_from_user(&cmd, buf, sizeof cmd))
583		return -EFAULT;
584
585	uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
586	if (!uobj)
587		return -EINVAL;
588
589	ret = ib_dealloc_pd(uobj->object);
590	if (!ret)
591		uobj->live = 0;
592
593	put_uobj_write(uobj);
594
595	if (ret)
596		return ret;
597
598	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
599
600	mutex_lock(&file->mutex);
601	list_del(&uobj->list);
602	mutex_unlock(&file->mutex);
603
604	put_uobj(uobj);
605
606	return in_len;
607}
608
609struct xrcd_table_entry {
610	struct rb_node  node;
611	struct ib_xrcd *xrcd;
612	struct inode   *inode;
613};
614
615static int xrcd_table_insert(struct ib_uverbs_device *dev,
616			    struct inode *inode,
617			    struct ib_xrcd *xrcd)
618{
619	struct xrcd_table_entry *entry, *scan;
620	struct rb_node **p = &dev->xrcd_tree.rb_node;
621	struct rb_node *parent = NULL;
622
623	entry = kmalloc(sizeof *entry, GFP_KERNEL);
624	if (!entry)
625		return -ENOMEM;
626
627	entry->xrcd  = xrcd;
628	entry->inode = inode;
629
630	while (*p) {
631		parent = *p;
632		scan = rb_entry(parent, struct xrcd_table_entry, node);
633
634		if (inode < scan->inode) {
635			p = &(*p)->rb_left;
636		} else if (inode > scan->inode) {
637			p = &(*p)->rb_right;
638		} else {
639			kfree(entry);
640			return -EEXIST;
641		}
642	}
643
644	rb_link_node(&entry->node, parent, p);
645	rb_insert_color(&entry->node, &dev->xrcd_tree);
646	igrab(inode);
647	return 0;
648}
649
650static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
651						  struct inode *inode)
652{
653	struct xrcd_table_entry *entry;
654	struct rb_node *p = dev->xrcd_tree.rb_node;
655
656	while (p) {
657		entry = rb_entry(p, struct xrcd_table_entry, node);
658
659		if (inode < entry->inode)
660			p = p->rb_left;
661		else if (inode > entry->inode)
662			p = p->rb_right;
663		else
664			return entry;
665	}
666
667	return NULL;
668}
669
670static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
671{
672	struct xrcd_table_entry *entry;
673
674	entry = xrcd_table_search(dev, inode);
675	if (!entry)
676		return NULL;
677
678	return entry->xrcd;
679}
680
681static void xrcd_table_delete(struct ib_uverbs_device *dev,
682			      struct inode *inode)
683{
684	struct xrcd_table_entry *entry;
685
686	entry = xrcd_table_search(dev, inode);
687	if (entry) {
688		iput(inode);
689		rb_erase(&entry->node, &dev->xrcd_tree);
690		kfree(entry);
691	}
692}
693
694ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
695			    const char __user *buf, int in_len,
696			    int out_len)
697{
698	struct ib_uverbs_open_xrcd	cmd;
699	struct ib_uverbs_open_xrcd_resp	resp;
700	struct ib_udata			udata;
701	struct ib_uxrcd_object         *obj;
702	struct ib_xrcd                 *xrcd = NULL;
703	struct file                    *f = NULL;
704	struct inode                   *inode = NULL;
705	int				ret = 0;
706	int				new_xrcd = 0;
707
708	if (out_len < sizeof resp)
709		return -ENOSPC;
710
711	if (copy_from_user(&cmd, buf, sizeof cmd))
712		return -EFAULT;
713
714	INIT_UDATA(&udata, buf + sizeof cmd,
715		   (unsigned long) cmd.response + sizeof resp,
716		   in_len - sizeof cmd, out_len - sizeof  resp);
717
718	mutex_lock(&file->device->xrcd_tree_mutex);
719
720	if (cmd.fd != -1) {
721		/* search for file descriptor */
722		f = fget(cmd.fd);
723		if (!f) {
724			ret = -EBADF;
725			goto err_tree_mutex_unlock;
726		}
727
728		inode = f->f_dentry->d_inode;
729		if (!inode) {
730			ret = -EBADF;
731			goto err_tree_mutex_unlock;
732		}
733
734		xrcd = find_xrcd(file->device, inode);
735		if (!xrcd && !(cmd.oflags & O_CREAT)) {
736			/* no file descriptor. Need CREATE flag */
737			ret = -EAGAIN;
738			goto err_tree_mutex_unlock;
739		}
740
741		if (xrcd && cmd.oflags & O_EXCL) {
742			ret = -EINVAL;
743			goto err_tree_mutex_unlock;
744		}
745	}
746
747	obj = kmalloc(sizeof *obj, GFP_KERNEL);
748	if (!obj) {
749		ret = -ENOMEM;
750		goto err_tree_mutex_unlock;
751	}
752
753	init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key);
754
755	down_write(&obj->uobject.mutex);
756
757	if (!xrcd) {
758		xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
759							file->ucontext, &udata);
760		if (IS_ERR(xrcd)) {
761			ret = PTR_ERR(xrcd);
762			goto err;
763		}
764
765		xrcd->inode   = inode;
766		xrcd->device  = file->device->ib_dev;
767		atomic_set(&xrcd->usecnt, 0);
768		mutex_init(&xrcd->tgt_qp_mutex);
769		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
770		new_xrcd = 1;
771	}
772
773	atomic_set(&obj->refcnt, 0);
774	obj->uobject.object = xrcd;
775	ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
776	if (ret)
777		goto err_idr;
778
779	memset(&resp, 0, sizeof resp);
780	resp.xrcd_handle = obj->uobject.id;
781
782	if (inode) {
783		if (new_xrcd) {
784			/* create new inode/xrcd table entry */
785			ret = xrcd_table_insert(file->device, inode, xrcd);
786			if (ret)
787				goto err_insert_xrcd;
788		}
789		atomic_inc(&xrcd->usecnt);
790	}
791
792	if (copy_to_user((void __user *) (unsigned long) cmd.response,
793			 &resp, sizeof resp)) {
794		ret = -EFAULT;
795		goto err_copy;
796	}
797
798	if (f)
799		fput(f);
800
801	mutex_lock(&file->mutex);
802	list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
803	mutex_unlock(&file->mutex);
804
805	obj->uobject.live = 1;
806	up_write(&obj->uobject.mutex);
807
808	mutex_unlock(&file->device->xrcd_tree_mutex);
809	return in_len;
810
811err_copy:
812	if (inode) {
813		if (new_xrcd)
814			xrcd_table_delete(file->device, inode);
815		atomic_dec(&xrcd->usecnt);
816	}
817
818err_insert_xrcd:
819	idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
820
821err_idr:
822	ib_dealloc_xrcd(xrcd);
823
824err:
825	put_uobj_write(&obj->uobject);
826
827err_tree_mutex_unlock:
828	if (f)
829		fput(f);
830
831	mutex_unlock(&file->device->xrcd_tree_mutex);
832
833	return ret;
834}
835
836ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
837			     const char __user *buf, int in_len,
838			     int out_len)
839{
840	struct ib_uverbs_close_xrcd cmd;
841	struct ib_uobject           *uobj;
842	struct ib_xrcd              *xrcd = NULL;
843	struct inode                *inode = NULL;
844	struct ib_uxrcd_object      *obj;
845	int                         live;
846	int                         ret = 0;
847
848	if (copy_from_user(&cmd, buf, sizeof cmd))
849		return -EFAULT;
850
851	mutex_lock(&file->device->xrcd_tree_mutex);
852	uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
853	if (!uobj) {
854		ret = -EINVAL;
855		goto out;
856	}
857
858	xrcd  = uobj->object;
859	inode = xrcd->inode;
860	obj   = container_of(uobj, struct ib_uxrcd_object, uobject);
861	if (atomic_read(&obj->refcnt)) {
862		put_uobj_write(uobj);
863		ret = -EBUSY;
864		goto out;
865	}
866
867	if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
868		ret = ib_dealloc_xrcd(uobj->object);
869		if (!ret)
870			uobj->live = 0;
871	}
872
873	live = uobj->live;
874	if (inode && ret)
875		atomic_inc(&xrcd->usecnt);
876
877	put_uobj_write(uobj);
878
879	if (ret)
880		goto out;
881
882	if (inode && !live)
883		xrcd_table_delete(file->device, inode);
884
885	idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
886	mutex_lock(&file->mutex);
887	list_del(&uobj->list);
888	mutex_unlock(&file->mutex);
889
890	put_uobj(uobj);
891	ret = in_len;
892
893out:
894	mutex_unlock(&file->device->xrcd_tree_mutex);
895	return ret;
896}
897
898void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
899			    struct ib_xrcd *xrcd)
900{
901	struct inode *inode;
902
903	inode = xrcd->inode;
904	if (inode && !atomic_dec_and_test(&xrcd->usecnt))
905		return;
906
907	ib_dealloc_xrcd(xrcd);
908
909	if (inode)
910		xrcd_table_delete(dev, inode);
911}
912
913ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
914			 const char __user *buf, int in_len,
915			 int out_len)
916{
917	struct ib_uverbs_reg_mr      cmd;
918	struct ib_uverbs_reg_mr_resp resp;
919	struct ib_udata              udata;
920	struct ib_uobject           *uobj;
921	struct ib_pd                *pd;
922	struct ib_mr                *mr;
923	int                          ret;
924
925	if (out_len < sizeof resp)
926		return -ENOSPC;
927
928	if (copy_from_user(&cmd, buf, sizeof cmd))
929		return -EFAULT;
930
931	INIT_UDATA(&udata, buf + sizeof cmd,
932		   (unsigned long) cmd.response + sizeof resp,
933		   in_len - sizeof cmd, out_len - sizeof resp);
934
935	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
936		return -EINVAL;
937
938	/*
939	 * Local write permission is required if remote write or
940	 * remote atomic permission is also requested.
941	 */
942	if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
943	    !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
944		return -EINVAL;
945
946	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
947	if (!uobj)
948		return -ENOMEM;
949
950	init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
951	down_write(&uobj->mutex);
952
953	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
954	if (!pd) {
955		ret = -EINVAL;
956		goto err_free;
957	}
958
959	mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
960				     cmd.access_flags, &udata);
961	if (IS_ERR(mr)) {
962		ret = PTR_ERR(mr);
963		goto err_put;
964	}
965
966	mr->device  = pd->device;
967	mr->pd      = pd;
968	mr->uobject = uobj;
969	atomic_inc(&pd->usecnt);
970	atomic_set(&mr->usecnt, 0);
971
972	uobj->object = mr;
973	ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
974	if (ret)
975		goto err_unreg;
976
977	memset(&resp, 0, sizeof resp);
978	resp.lkey      = mr->lkey;
979	resp.rkey      = mr->rkey;
980	resp.mr_handle = uobj->id;
981
982	if (copy_to_user((void __user *) (unsigned long) cmd.response,
983			 &resp, sizeof resp)) {
984		ret = -EFAULT;
985		goto err_copy;
986	}
987
988	put_pd_read(pd);
989
990	mutex_lock(&file->mutex);
991	list_add_tail(&uobj->list, &file->ucontext->mr_list);
992	mutex_unlock(&file->mutex);
993
994	uobj->live = 1;
995
996	up_write(&uobj->mutex);
997
998	return in_len;
999
1000err_copy:
1001	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1002
1003err_unreg:
1004	ib_dereg_mr(mr);
1005
1006err_put:
1007	put_pd_read(pd);
1008
1009err_free:
1010	put_uobj_write(uobj);
1011	return ret;
1012}
1013
1014ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1015			   const char __user *buf, int in_len,
1016			   int out_len)
1017{
1018	struct ib_uverbs_dereg_mr cmd;
1019	struct ib_mr             *mr;
1020	struct ib_uobject	 *uobj;
1021	int                       ret = -EINVAL;
1022
1023	if (copy_from_user(&cmd, buf, sizeof cmd))
1024		return -EFAULT;
1025
1026	uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1027	if (!uobj)
1028		return -EINVAL;
1029
1030	mr = uobj->object;
1031
1032	ret = ib_dereg_mr(mr);
1033	if (!ret)
1034		uobj->live = 0;
1035
1036	put_uobj_write(uobj);
1037
1038	if (ret)
1039		return ret;
1040
1041	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1042
1043	mutex_lock(&file->mutex);
1044	list_del(&uobj->list);
1045	mutex_unlock(&file->mutex);
1046
1047	put_uobj(uobj);
1048
1049	return in_len;
1050}
1051
1052ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1053				      const char __user *buf, int in_len,
1054				      int out_len)
1055{
1056	struct ib_uverbs_create_comp_channel	   cmd;
1057	struct ib_uverbs_create_comp_channel_resp  resp;
1058	struct file				  *filp;
1059	int ret;
1060
1061	if (out_len < sizeof resp)
1062		return -ENOSPC;
1063
1064	if (copy_from_user(&cmd, buf, sizeof cmd))
1065		return -EFAULT;
1066
1067	ret = get_unused_fd();
1068	if (ret < 0)
1069		return ret;
1070	resp.fd = ret;
1071
1072	filp = ib_uverbs_alloc_event_file(file, 0);
1073	if (IS_ERR(filp)) {
1074		put_unused_fd(resp.fd);
1075		return PTR_ERR(filp);
1076	}
1077
1078	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1079			 &resp, sizeof resp)) {
1080		put_unused_fd(resp.fd);
1081		fput(filp);
1082		return -EFAULT;
1083	}
1084
1085	fd_install(resp.fd, filp);
1086	return in_len;
1087}
1088
1089ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1090			    const char __user *buf, int in_len,
1091			    int out_len)
1092{
1093	struct ib_uverbs_create_cq      cmd;
1094	struct ib_uverbs_create_cq_resp resp;
1095	struct ib_udata                 udata;
1096	struct ib_ucq_object           *obj;
1097	struct ib_uverbs_event_file    *ev_file = NULL;
1098	struct ib_cq                   *cq;
1099	int                             ret;
1100
1101	if (out_len < sizeof resp)
1102		return -ENOSPC;
1103
1104	if (copy_from_user(&cmd, buf, sizeof cmd))
1105		return -EFAULT;
1106
1107	INIT_UDATA(&udata, buf + sizeof cmd,
1108		   (unsigned long) cmd.response + sizeof resp,
1109		   in_len - sizeof cmd, out_len - sizeof resp);
1110
1111	if (cmd.comp_vector >= file->device->num_comp_vectors)
1112		return -EINVAL;
1113
1114	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1115	if (!obj)
1116		return -ENOMEM;
1117
1118	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
1119	down_write(&obj->uobject.mutex);
1120
1121	if (cmd.comp_channel >= 0) {
1122		ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1123		if (!ev_file) {
1124			ret = -EINVAL;
1125			goto err;
1126		}
1127	}
1128
1129	obj->uverbs_file	   = file;
1130	obj->comp_events_reported  = 0;
1131	obj->async_events_reported = 0;
1132	INIT_LIST_HEAD(&obj->comp_list);
1133	INIT_LIST_HEAD(&obj->async_list);
1134
1135	cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1136					     cmd.comp_vector,
1137					     file->ucontext, &udata);
1138	if (IS_ERR(cq)) {
1139		ret = PTR_ERR(cq);
1140		goto err_file;
1141	}
1142
1143	cq->device        = file->device->ib_dev;
1144	cq->uobject       = &obj->uobject;
1145	cq->comp_handler  = ib_uverbs_comp_handler;
1146	cq->event_handler = ib_uverbs_cq_event_handler;
1147	cq->cq_context    = ev_file;
1148	atomic_set(&cq->usecnt, 0);
1149
1150	obj->uobject.object = cq;
1151	ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1152	if (ret)
1153		goto err_free;
1154
1155	memset(&resp, 0, sizeof resp);
1156	resp.cq_handle = obj->uobject.id;
1157	resp.cqe       = cq->cqe;
1158
1159	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1160			 &resp, sizeof resp)) {
1161		ret = -EFAULT;
1162		goto err_copy;
1163	}
1164
1165	mutex_lock(&file->mutex);
1166	list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1167	mutex_unlock(&file->mutex);
1168
1169	obj->uobject.live = 1;
1170
1171	up_write(&obj->uobject.mutex);
1172
1173	return in_len;
1174
1175err_copy:
1176	idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1177
1178err_free:
1179	ib_destroy_cq(cq);
1180
1181err_file:
1182	if (ev_file)
1183		ib_uverbs_release_ucq(file, ev_file, obj);
1184
1185err:
1186	put_uobj_write(&obj->uobject);
1187	return ret;
1188}
1189
1190ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1191			    const char __user *buf, int in_len,
1192			    int out_len)
1193{
1194	struct ib_uverbs_resize_cq	cmd;
1195	struct ib_uverbs_resize_cq_resp	resp;
1196	struct ib_udata                 udata;
1197	struct ib_cq			*cq;
1198	int				ret = -EINVAL;
1199
1200	if (copy_from_user(&cmd, buf, sizeof cmd))
1201		return -EFAULT;
1202
1203	INIT_UDATA(&udata, buf + sizeof cmd,
1204		   (unsigned long) cmd.response + sizeof resp,
1205		   in_len - sizeof cmd, out_len - sizeof resp);
1206
1207	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1208	if (!cq)
1209		return -EINVAL;
1210
1211	ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1212	if (ret)
1213		goto out;
1214
1215	resp.cqe = cq->cqe;
1216
1217	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1218			 &resp, sizeof resp.cqe))
1219		ret = -EFAULT;
1220
1221out:
1222	put_cq_read(cq);
1223
1224	return ret ? ret : in_len;
1225}
1226
1227static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1228{
1229	struct ib_uverbs_wc tmp;
1230
1231	tmp.wr_id		= wc->wr_id;
1232	tmp.status		= wc->status;
1233	tmp.opcode		= wc->opcode;
1234	tmp.vendor_err		= wc->vendor_err;
1235	tmp.byte_len		= wc->byte_len;
1236	tmp.ex.imm_data		= (__u32 __force) wc->ex.imm_data;
1237	tmp.qp_num		= wc->qp->qp_num;
1238	tmp.src_qp		= wc->src_qp;
1239	tmp.wc_flags		= wc->wc_flags;
1240	tmp.pkey_index		= wc->pkey_index;
1241	tmp.slid		= wc->slid;
1242	tmp.sl			= wc->sl;
1243	tmp.dlid_path_bits	= wc->dlid_path_bits;
1244	tmp.port_num		= wc->port_num;
1245	tmp.reserved		= 0;
1246
1247	if (copy_to_user(dest, &tmp, sizeof tmp))
1248		return -EFAULT;
1249
1250	return 0;
1251}
1252
1253ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1254			  const char __user *buf, int in_len,
1255			  int out_len)
1256{
1257	struct ib_uverbs_poll_cq       cmd;
1258	struct ib_uverbs_poll_cq_resp  resp;
1259	u8 __user                     *header_ptr;
1260	u8 __user                     *data_ptr;
1261	struct ib_cq                  *cq;
1262	struct ib_wc                   wc;
1263	int                            ret;
1264
1265	if (copy_from_user(&cmd, buf, sizeof cmd))
1266		return -EFAULT;
1267
1268	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1269	if (!cq)
1270		return -EINVAL;
1271
1272	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
1273	header_ptr = (void __user *)(unsigned long) cmd.response;
1274	data_ptr = header_ptr + sizeof resp;
1275
1276	memset(&resp, 0, sizeof resp);
1277	while (resp.count < cmd.ne) {
1278		ret = ib_poll_cq(cq, 1, &wc);
1279		if (ret < 0)
1280			goto out_put;
1281		if (!ret)
1282			break;
1283
1284		ret = copy_wc_to_user(data_ptr, &wc);
1285		if (ret)
1286			goto out_put;
1287
1288		data_ptr += sizeof(struct ib_uverbs_wc);
1289		++resp.count;
1290	}
1291
1292	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1293		ret = -EFAULT;
1294		goto out_put;
1295	}
1296
1297	ret = in_len;
1298
1299out_put:
1300	put_cq_read(cq);
1301	return ret;
1302}
1303
1304ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1305				const char __user *buf, int in_len,
1306				int out_len)
1307{
1308	struct ib_uverbs_req_notify_cq cmd;
1309	struct ib_cq                  *cq;
1310
1311	if (copy_from_user(&cmd, buf, sizeof cmd))
1312		return -EFAULT;
1313
1314	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1315	if (!cq)
1316		return -EINVAL;
1317
1318	ib_req_notify_cq(cq, cmd.solicited_only ?
1319			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1320
1321	put_cq_read(cq);
1322
1323	return in_len;
1324}
1325
1326ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1327			     const char __user *buf, int in_len,
1328			     int out_len)
1329{
1330	struct ib_uverbs_destroy_cq      cmd;
1331	struct ib_uverbs_destroy_cq_resp resp;
1332	struct ib_uobject		*uobj;
1333	struct ib_cq               	*cq;
1334	struct ib_ucq_object        	*obj;
1335	struct ib_uverbs_event_file	*ev_file;
1336	int                        	 ret = -EINVAL;
1337
1338	if (copy_from_user(&cmd, buf, sizeof cmd))
1339		return -EFAULT;
1340
1341	uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1342	if (!uobj)
1343		return -EINVAL;
1344	cq      = uobj->object;
1345	ev_file = cq->cq_context;
1346	obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1347
1348	ret = ib_destroy_cq(cq);
1349	if (!ret)
1350		uobj->live = 0;
1351
1352	put_uobj_write(uobj);
1353
1354	if (ret)
1355		return ret;
1356
1357	idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1358
1359	mutex_lock(&file->mutex);
1360	list_del(&uobj->list);
1361	mutex_unlock(&file->mutex);
1362
1363	ib_uverbs_release_ucq(file, ev_file, obj);
1364
1365	memset(&resp, 0, sizeof resp);
1366	resp.comp_events_reported  = obj->comp_events_reported;
1367	resp.async_events_reported = obj->async_events_reported;
1368
1369	put_uobj(uobj);
1370
1371	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1372			 &resp, sizeof resp))
1373		return -EFAULT;
1374
1375	return in_len;
1376}
1377
1378ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1379			    const char __user *buf, int in_len,
1380			    int out_len)
1381{
1382	struct ib_uverbs_create_qp      cmd;
1383	struct ib_uverbs_create_qp_resp resp;
1384	struct ib_udata                 udata;
1385	struct ib_uqp_object           *obj;
1386	struct ib_device	       *device;
1387	struct ib_pd                   *pd = NULL;
1388	struct ib_xrcd		       *xrcd = NULL;
1389	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
1390	struct ib_cq                   *scq = NULL, *rcq = NULL;
1391	struct ib_srq                  *srq = NULL;
1392	struct ib_qp                   *qp;
1393	struct ib_qp_init_attr          attr;
1394	int ret;
1395
1396	if (out_len < sizeof resp)
1397		return -ENOSPC;
1398
1399	if (copy_from_user(&cmd, buf, sizeof cmd))
1400		return -EFAULT;
1401
1402	INIT_UDATA(&udata, buf + sizeof cmd,
1403		   (unsigned long) cmd.response + sizeof resp,
1404		   in_len - sizeof cmd, out_len - sizeof resp);
1405
1406	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1407	if (!obj)
1408		return -ENOMEM;
1409
1410	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1411	down_write(&obj->uevent.uobject.mutex);
1412
1413	if (cmd.qp_type == IB_QPT_XRC_TGT) {
1414		xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1415		if (!xrcd) {
1416			ret = -EINVAL;
1417			goto err_put;
1418		}
1419		device = xrcd->device;
1420	} else {
1421		pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1422		scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
1423		if (!pd || !scq) {
1424			ret = -EINVAL;
1425			goto err_put;
1426		}
1427
1428		if (cmd.qp_type == IB_QPT_XRC_INI) {
1429			cmd.max_recv_wr = cmd.max_recv_sge = 0;
1430		} else {
1431			if (cmd.is_srq) {
1432				srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1433				if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1434					ret = -EINVAL;
1435					goto err_put;
1436				}
1437			}
1438			rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ?
1439			       scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
1440			if (!rcq) {
1441				ret = -EINVAL;
1442				goto err_put;
1443			}
1444		}
1445		device = pd->device;
1446	}
1447
1448	attr.event_handler = ib_uverbs_qp_event_handler;
1449	attr.qp_context    = file;
1450	attr.send_cq       = scq;
1451	attr.recv_cq       = rcq;
1452	attr.srq           = srq;
1453	attr.xrcd	   = xrcd;
1454	attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1455	attr.qp_type       = cmd.qp_type;
1456	attr.create_flags  = 0;
1457
1458	attr.cap.max_send_wr     = cmd.max_send_wr;
1459	attr.cap.max_recv_wr     = cmd.max_recv_wr;
1460	attr.cap.max_send_sge    = cmd.max_send_sge;
1461	attr.cap.max_recv_sge    = cmd.max_recv_sge;
1462	attr.cap.max_inline_data = cmd.max_inline_data;
1463
1464	obj->uevent.events_reported     = 0;
1465	INIT_LIST_HEAD(&obj->uevent.event_list);
1466	INIT_LIST_HEAD(&obj->mcast_list);
1467
1468	if (cmd.qp_type == IB_QPT_XRC_TGT)
1469		qp = ib_create_qp(pd, &attr);
1470	else
1471		qp = device->create_qp(pd, &attr, &udata);
1472
1473	if (IS_ERR(qp)) {
1474		ret = PTR_ERR(qp);
1475		goto err_put;
1476	}
1477
1478	if (cmd.qp_type != IB_QPT_XRC_TGT) {
1479		qp->real_qp	  = qp;
1480		qp->device	  = device;
1481		qp->pd		  = pd;
1482		qp->send_cq	  = attr.send_cq;
1483		qp->recv_cq	  = attr.recv_cq;
1484		qp->srq		  = attr.srq;
1485		qp->event_handler = attr.event_handler;
1486		qp->qp_context	  = attr.qp_context;
1487		qp->qp_type	  = attr.qp_type;
1488		atomic_set(&qp->usecnt, 0);
1489		atomic_inc(&pd->usecnt);
1490		atomic_inc(&attr.send_cq->usecnt);
1491		if (attr.recv_cq)
1492			atomic_inc(&attr.recv_cq->usecnt);
1493		if (attr.srq)
1494			atomic_inc(&attr.srq->usecnt);
1495	}
1496	qp->uobject = &obj->uevent.uobject;
1497
1498	obj->uevent.uobject.object = qp;
1499	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1500	if (ret)
1501		goto err_destroy;
1502
1503	memset(&resp, 0, sizeof resp);
1504	resp.qpn             = qp->qp_num;
1505	resp.qp_handle       = obj->uevent.uobject.id;
1506	resp.max_recv_sge    = attr.cap.max_recv_sge;
1507	resp.max_send_sge    = attr.cap.max_send_sge;
1508	resp.max_recv_wr     = attr.cap.max_recv_wr;
1509	resp.max_send_wr     = attr.cap.max_send_wr;
1510	resp.max_inline_data = attr.cap.max_inline_data;
1511
1512	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1513			 &resp, sizeof resp)) {
1514		ret = -EFAULT;
1515		goto err_copy;
1516	}
1517
1518	if (xrcd)
1519		put_xrcd_read(xrcd_uobj);
1520	if (pd)
1521		put_pd_read(pd);
1522	if (scq)
1523		put_cq_read(scq);
1524	if (rcq && rcq != scq)
1525		put_cq_read(rcq);
1526	if (srq)
1527		put_srq_read(srq);
1528
1529	mutex_lock(&file->mutex);
1530	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1531	mutex_unlock(&file->mutex);
1532
1533	obj->uevent.uobject.live = 1;
1534
1535	up_write(&obj->uevent.uobject.mutex);
1536
1537	return in_len;
1538
1539err_copy:
1540	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1541
1542err_destroy:
1543	ib_destroy_qp(qp);
1544
1545err_put:
1546	if (xrcd)
1547		put_xrcd_read(xrcd_uobj);
1548	if (pd)
1549		put_pd_read(pd);
1550	if (scq)
1551		put_cq_read(scq);
1552	if (rcq && rcq != scq)
1553		put_cq_read(rcq);
1554	if (srq)
1555		put_srq_read(srq);
1556
1557	put_uobj_write(&obj->uevent.uobject);
1558	return ret;
1559}
1560
1561ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1562			  const char __user *buf, int in_len, int out_len)
1563{
1564	struct ib_uverbs_open_qp        cmd;
1565	struct ib_uverbs_create_qp_resp resp;
1566	struct ib_udata                 udata;
1567	struct ib_uqp_object           *obj;
1568	struct ib_xrcd		       *xrcd;
1569	struct ib_uobject	       *uninitialized_var(xrcd_uobj);
1570	struct ib_qp                   *qp;
1571	struct ib_qp_open_attr          attr;
1572	int ret;
1573
1574	if (out_len < sizeof resp)
1575		return -ENOSPC;
1576
1577	if (copy_from_user(&cmd, buf, sizeof cmd))
1578		return -EFAULT;
1579
1580	INIT_UDATA(&udata, buf + sizeof cmd,
1581		   (unsigned long) cmd.response + sizeof resp,
1582		   in_len - sizeof cmd, out_len - sizeof resp);
1583
1584	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1585	if (!obj)
1586		return -ENOMEM;
1587
1588	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1589	down_write(&obj->uevent.uobject.mutex);
1590
1591	xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1592	if (!xrcd) {
1593		ret = -EINVAL;
1594		goto err_put;
1595	}
1596
1597	attr.event_handler = ib_uverbs_qp_event_handler;
1598	attr.qp_context    = file;
1599	attr.qp_num        = cmd.qpn;
1600	attr.qp_type       = cmd.qp_type;
1601
1602	obj->uevent.events_reported = 0;
1603	INIT_LIST_HEAD(&obj->uevent.event_list);
1604	INIT_LIST_HEAD(&obj->mcast_list);
1605
1606	qp = ib_open_qp(xrcd, &attr);
1607	if (IS_ERR(qp)) {
1608		ret = PTR_ERR(qp);
1609		goto err_put;
1610	}
1611
1612	qp->uobject = &obj->uevent.uobject;
1613
1614	obj->uevent.uobject.object = qp;
1615	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1616	if (ret)
1617		goto err_destroy;
1618
1619	memset(&resp, 0, sizeof resp);
1620	resp.qpn       = qp->qp_num;
1621	resp.qp_handle = obj->uevent.uobject.id;
1622
1623	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1624			 &resp, sizeof resp)) {
1625		ret = -EFAULT;
1626		goto err_remove;
1627	}
1628
1629	put_xrcd_read(xrcd_uobj);
1630
1631	mutex_lock(&file->mutex);
1632	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1633	mutex_unlock(&file->mutex);
1634
1635	obj->uevent.uobject.live = 1;
1636
1637	up_write(&obj->uevent.uobject.mutex);
1638
1639	return in_len;
1640
1641err_remove:
1642	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1643
1644err_destroy:
1645	ib_destroy_qp(qp);
1646
1647err_put:
1648	put_xrcd_read(xrcd_uobj);
1649	put_uobj_write(&obj->uevent.uobject);
1650	return ret;
1651}
1652
1653ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1654			   const char __user *buf, int in_len,
1655			   int out_len)
1656{
1657	struct ib_uverbs_query_qp      cmd;
1658	struct ib_uverbs_query_qp_resp resp;
1659	struct ib_qp                   *qp;
1660	struct ib_qp_attr              *attr;
1661	struct ib_qp_init_attr         *init_attr;
1662	int                            ret;
1663
1664	if (copy_from_user(&cmd, buf, sizeof cmd))
1665		return -EFAULT;
1666
1667	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1668	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1669	if (!attr || !init_attr) {
1670		ret = -ENOMEM;
1671		goto out;
1672	}
1673
1674	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1675	if (!qp) {
1676		ret = -EINVAL;
1677		goto out;
1678	}
1679
1680	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1681
1682	put_qp_read(qp);
1683
1684	if (ret)
1685		goto out;
1686
1687	memset(&resp, 0, sizeof resp);
1688
1689	resp.qp_state               = attr->qp_state;
1690	resp.cur_qp_state           = attr->cur_qp_state;
1691	resp.path_mtu               = attr->path_mtu;
1692	resp.path_mig_state         = attr->path_mig_state;
1693	resp.qkey                   = attr->qkey;
1694	resp.rq_psn                 = attr->rq_psn;
1695	resp.sq_psn                 = attr->sq_psn;
1696	resp.dest_qp_num            = attr->dest_qp_num;
1697	resp.qp_access_flags        = attr->qp_access_flags;
1698	resp.pkey_index             = attr->pkey_index;
1699	resp.alt_pkey_index         = attr->alt_pkey_index;
1700	resp.sq_draining            = attr->sq_draining;
1701	resp.max_rd_atomic          = attr->max_rd_atomic;
1702	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1703	resp.min_rnr_timer          = attr->min_rnr_timer;
1704	resp.port_num               = attr->port_num;
1705	resp.timeout                = attr->timeout;
1706	resp.retry_cnt              = attr->retry_cnt;
1707	resp.rnr_retry              = attr->rnr_retry;
1708	resp.alt_port_num           = attr->alt_port_num;
1709	resp.alt_timeout            = attr->alt_timeout;
1710
1711	memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1712	resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1713	resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1714	resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1715	resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1716	resp.dest.dlid              = attr->ah_attr.dlid;
1717	resp.dest.sl                = attr->ah_attr.sl;
1718	resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1719	resp.dest.static_rate       = attr->ah_attr.static_rate;
1720	resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1721	resp.dest.port_num          = attr->ah_attr.port_num;
1722
1723	memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1724	resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1725	resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1726	resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1727	resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1728	resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1729	resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1730	resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1731	resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1732	resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1733	resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1734
1735	resp.max_send_wr            = init_attr->cap.max_send_wr;
1736	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1737	resp.max_send_sge           = init_attr->cap.max_send_sge;
1738	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1739	resp.max_inline_data        = init_attr->cap.max_inline_data;
1740	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1741
1742	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1743			 &resp, sizeof resp))
1744		ret = -EFAULT;
1745
1746out:
1747	kfree(attr);
1748	kfree(init_attr);
1749
1750	return ret ? ret : in_len;
1751}
1752
1753/* Remove ignored fields set in the attribute mask */
1754static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1755{
1756	switch (qp_type) {
1757	case IB_QPT_XRC_INI:
1758		return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1759	case IB_QPT_XRC_TGT:
1760		return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1761				IB_QP_RNR_RETRY);
1762	default:
1763		return mask;
1764	}
1765}
1766
1767ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1768			    const char __user *buf, int in_len,
1769			    int out_len)
1770{
1771	struct ib_uverbs_modify_qp cmd;
1772	struct ib_udata            udata;
1773	struct ib_qp              *qp;
1774	struct ib_qp_attr         *attr;
1775	int                        ret;
1776
1777	if (copy_from_user(&cmd, buf, sizeof cmd))
1778		return -EFAULT;
1779
1780	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1781		   out_len);
1782
1783	attr = kmalloc(sizeof *attr, GFP_KERNEL);
1784	if (!attr)
1785		return -ENOMEM;
1786
1787	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1788	if (!qp) {
1789		ret = -EINVAL;
1790		goto out;
1791	}
1792
1793	attr->qp_state 		  = cmd.qp_state;
1794	attr->cur_qp_state 	  = cmd.cur_qp_state;
1795	attr->path_mtu 		  = cmd.path_mtu;
1796	attr->path_mig_state 	  = cmd.path_mig_state;
1797	attr->qkey 		  = cmd.qkey;
1798	attr->rq_psn 		  = cmd.rq_psn;
1799	attr->sq_psn 		  = cmd.sq_psn;
1800	attr->dest_qp_num 	  = cmd.dest_qp_num;
1801	attr->qp_access_flags 	  = cmd.qp_access_flags;
1802	attr->pkey_index 	  = cmd.pkey_index;
1803	attr->alt_pkey_index 	  = cmd.alt_pkey_index;
1804	attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1805	attr->max_rd_atomic 	  = cmd.max_rd_atomic;
1806	attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
1807	attr->min_rnr_timer 	  = cmd.min_rnr_timer;
1808	attr->port_num 		  = cmd.port_num;
1809	attr->timeout 		  = cmd.timeout;
1810	attr->retry_cnt 	  = cmd.retry_cnt;
1811	attr->rnr_retry 	  = cmd.rnr_retry;
1812	attr->alt_port_num 	  = cmd.alt_port_num;
1813	attr->alt_timeout 	  = cmd.alt_timeout;
1814
1815	memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1816	attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
1817	attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
1818	attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
1819	attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
1820	attr->ah_attr.dlid 	    	    = cmd.dest.dlid;
1821	attr->ah_attr.sl   	    	    = cmd.dest.sl;
1822	attr->ah_attr.src_path_bits 	    = cmd.dest.src_path_bits;
1823	attr->ah_attr.static_rate   	    = cmd.dest.static_rate;
1824	attr->ah_attr.ah_flags 	    	    = cmd.dest.is_global ? IB_AH_GRH : 0;
1825	attr->ah_attr.port_num 	    	    = cmd.dest.port_num;
1826
1827	memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1828	attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
1829	attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
1830	attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
1831	attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1832	attr->alt_ah_attr.dlid 	    	    = cmd.alt_dest.dlid;
1833	attr->alt_ah_attr.sl   	    	    = cmd.alt_dest.sl;
1834	attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
1835	attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
1836	attr->alt_ah_attr.ah_flags 	    = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1837	attr->alt_ah_attr.port_num 	    = cmd.alt_dest.port_num;
1838
1839	if (qp->real_qp == qp) {
1840		ret = qp->device->modify_qp(qp, attr,
1841			modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
1842	} else {
1843		ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
1844	}
1845
1846	put_qp_read(qp);
1847
1848	if (ret)
1849		goto out;
1850
1851	ret = in_len;
1852
1853out:
1854	kfree(attr);
1855
1856	return ret;
1857}
1858
1859ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1860			     const char __user *buf, int in_len,
1861			     int out_len)
1862{
1863	struct ib_uverbs_destroy_qp      cmd;
1864	struct ib_uverbs_destroy_qp_resp resp;
1865	struct ib_uobject		*uobj;
1866	struct ib_qp               	*qp;
1867	struct ib_uqp_object        	*obj;
1868	int                        	 ret = -EINVAL;
1869
1870	if (copy_from_user(&cmd, buf, sizeof cmd))
1871		return -EFAULT;
1872
1873	memset(&resp, 0, sizeof resp);
1874
1875	uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
1876	if (!uobj)
1877		return -EINVAL;
1878	qp  = uobj->object;
1879	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1880
1881	if (!list_empty(&obj->mcast_list)) {
1882		put_uobj_write(uobj);
1883		return -EBUSY;
1884	}
1885
1886	ret = ib_destroy_qp(qp);
1887	if (!ret)
1888		uobj->live = 0;
1889
1890	put_uobj_write(uobj);
1891
1892	if (ret)
1893		return ret;
1894
1895	idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
1896
1897	mutex_lock(&file->mutex);
1898	list_del(&uobj->list);
1899	mutex_unlock(&file->mutex);
1900
1901	ib_uverbs_release_uevent(file, &obj->uevent);
1902
1903	resp.events_reported = obj->uevent.events_reported;
1904
1905	put_uobj(uobj);
1906
1907	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1908			 &resp, sizeof resp))
1909		return -EFAULT;
1910
1911	return in_len;
1912}
1913
1914ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1915			    const char __user *buf, int in_len,
1916			    int out_len)
1917{
1918	struct ib_uverbs_post_send      cmd;
1919	struct ib_uverbs_post_send_resp resp;
1920	struct ib_uverbs_send_wr       *user_wr;
1921	struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
1922	struct ib_qp                   *qp;
1923	int                             i, sg_ind;
1924	int				is_ud;
1925	ssize_t                         ret = -EINVAL;
1926
1927	if (copy_from_user(&cmd, buf, sizeof cmd))
1928		return -EFAULT;
1929
1930	if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
1931	    cmd.sge_count * sizeof (struct ib_uverbs_sge))
1932		return -EINVAL;
1933
1934	if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
1935		return -EINVAL;
1936
1937	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
1938	if (!user_wr)
1939		return -ENOMEM;
1940
1941	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1942	if (!qp)
1943		goto out;
1944
1945	is_ud = qp->qp_type == IB_QPT_UD;
1946	sg_ind = 0;
1947	last = NULL;
1948	for (i = 0; i < cmd.wr_count; ++i) {
1949		if (copy_from_user(user_wr,
1950				   buf + sizeof cmd + i * cmd.wqe_size,
1951				   cmd.wqe_size)) {
1952			ret = -EFAULT;
1953			goto out_put;
1954		}
1955
1956		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
1957			ret = -EINVAL;
1958			goto out_put;
1959		}
1960
1961		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1962			       user_wr->num_sge * sizeof (struct ib_sge),
1963			       GFP_KERNEL);
1964		if (!next) {
1965			ret = -ENOMEM;
1966			goto out_put;
1967		}
1968
1969		if (!last)
1970			wr = next;
1971		else
1972			last->next = next;
1973		last = next;
1974
1975		next->next       = NULL;
1976		next->wr_id      = user_wr->wr_id;
1977		next->num_sge    = user_wr->num_sge;
1978		next->opcode     = user_wr->opcode;
1979		next->send_flags = user_wr->send_flags;
1980
1981		if (is_ud) {
1982			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
1983						     file->ucontext);
1984			if (!next->wr.ud.ah) {
1985				ret = -EINVAL;
1986				goto out_put;
1987			}
1988			next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
1989			next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1990		} else {
1991			switch (next->opcode) {
1992			case IB_WR_RDMA_WRITE_WITH_IMM:
1993				next->ex.imm_data =
1994					(__be32 __force) user_wr->ex.imm_data;
1995			case IB_WR_RDMA_WRITE:
1996			case IB_WR_RDMA_READ:
1997				next->wr.rdma.remote_addr =
1998					user_wr->wr.rdma.remote_addr;
1999				next->wr.rdma.rkey        =
2000					user_wr->wr.rdma.rkey;
2001				break;
2002			case IB_WR_SEND_WITH_IMM:
2003				next->ex.imm_data =
2004					(__be32 __force) user_wr->ex.imm_data;
2005				break;
2006			case IB_WR_SEND_WITH_INV:
2007				next->ex.invalidate_rkey =
2008					user_wr->ex.invalidate_rkey;
2009				break;
2010			case IB_WR_ATOMIC_CMP_AND_SWP:
2011			case IB_WR_ATOMIC_FETCH_AND_ADD:
2012				next->wr.atomic.remote_addr =
2013					user_wr->wr.atomic.remote_addr;
2014				next->wr.atomic.compare_add =
2015					user_wr->wr.atomic.compare_add;
2016				next->wr.atomic.swap = user_wr->wr.atomic.swap;
2017				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2018				break;
2019			default:
2020				break;
2021			}
2022		}
2023
2024		if (next->num_sge) {
2025			next->sg_list = (void *) next +
2026				ALIGN(sizeof *next, sizeof (struct ib_sge));
2027			if (copy_from_user(next->sg_list,
2028					   buf + sizeof cmd +
2029					   cmd.wr_count * cmd.wqe_size +
2030					   sg_ind * sizeof (struct ib_sge),
2031					   next->num_sge * sizeof (struct ib_sge))) {
2032				ret = -EFAULT;
2033				goto out_put;
2034			}
2035			sg_ind += next->num_sge;
2036		} else
2037			next->sg_list = NULL;
2038	}
2039
2040	resp.bad_wr = 0;
2041	ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2042	if (ret)
2043		for (next = wr; next; next = next->next) {
2044			++resp.bad_wr;
2045			if (next == bad_wr)
2046				break;
2047		}
2048
2049	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2050			 &resp, sizeof resp))
2051		ret = -EFAULT;
2052
2053out_put:
2054	put_qp_read(qp);
2055
2056	while (wr) {
2057		if (is_ud && wr->wr.ud.ah)
2058			put_ah_read(wr->wr.ud.ah);
2059		next = wr->next;
2060		kfree(wr);
2061		wr = next;
2062	}
2063
2064out:
2065	kfree(user_wr);
2066
2067	return ret ? ret : in_len;
2068}
2069
2070static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2071						    int in_len,
2072						    u32 wr_count,
2073						    u32 sge_count,
2074						    u32 wqe_size)
2075{
2076	struct ib_uverbs_recv_wr *user_wr;
2077	struct ib_recv_wr        *wr = NULL, *last, *next;
2078	int                       sg_ind;
2079	int                       i;
2080	int                       ret;
2081
2082	if (in_len < wqe_size * wr_count +
2083	    sge_count * sizeof (struct ib_uverbs_sge))
2084		return ERR_PTR(-EINVAL);
2085
2086	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2087		return ERR_PTR(-EINVAL);
2088
2089	user_wr = kmalloc(wqe_size, GFP_KERNEL);
2090	if (!user_wr)
2091		return ERR_PTR(-ENOMEM);
2092
2093	sg_ind = 0;
2094	last = NULL;
2095	for (i = 0; i < wr_count; ++i) {
2096		if (copy_from_user(user_wr, buf + i * wqe_size,
2097				   wqe_size)) {
2098			ret = -EFAULT;
2099			goto err;
2100		}
2101
2102		if (user_wr->num_sge + sg_ind > sge_count) {
2103			ret = -EINVAL;
2104			goto err;
2105		}
2106
2107		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2108			       user_wr->num_sge * sizeof (struct ib_sge),
2109			       GFP_KERNEL);
2110		if (!next) {
2111			ret = -ENOMEM;
2112			goto err;
2113		}
2114
2115		if (!last)
2116			wr = next;
2117		else
2118			last->next = next;
2119		last = next;
2120
2121		next->next       = NULL;
2122		next->wr_id      = user_wr->wr_id;
2123		next->num_sge    = user_wr->num_sge;
2124
2125		if (next->num_sge) {
2126			next->sg_list = (void *) next +
2127				ALIGN(sizeof *next, sizeof (struct ib_sge));
2128			if (copy_from_user(next->sg_list,
2129					   buf + wr_count * wqe_size +
2130					   sg_ind * sizeof (struct ib_sge),
2131					   next->num_sge * sizeof (struct ib_sge))) {
2132				ret = -EFAULT;
2133				goto err;
2134			}
2135			sg_ind += next->num_sge;
2136		} else
2137			next->sg_list = NULL;
2138	}
2139
2140	kfree(user_wr);
2141	return wr;
2142
2143err:
2144	kfree(user_wr);
2145
2146	while (wr) {
2147		next = wr->next;
2148		kfree(wr);
2149		wr = next;
2150	}
2151
2152	return ERR_PTR(ret);
2153}
2154
2155ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2156			    const char __user *buf, int in_len,
2157			    int out_len)
2158{
2159	struct ib_uverbs_post_recv      cmd;
2160	struct ib_uverbs_post_recv_resp resp;
2161	struct ib_recv_wr              *wr, *next, *bad_wr;
2162	struct ib_qp                   *qp;
2163	ssize_t                         ret = -EINVAL;
2164
2165	if (copy_from_user(&cmd, buf, sizeof cmd))
2166		return -EFAULT;
2167
2168	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2169				       in_len - sizeof cmd, cmd.wr_count,
2170				       cmd.sge_count, cmd.wqe_size);
2171	if (IS_ERR(wr))
2172		return PTR_ERR(wr);
2173
2174	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2175	if (!qp)
2176		goto out;
2177
2178	resp.bad_wr = 0;
2179	ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2180
2181	put_qp_read(qp);
2182
2183	if (ret)
2184		for (next = wr; next; next = next->next) {
2185			++resp.bad_wr;
2186			if (next == bad_wr)
2187				break;
2188		}
2189
2190	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2191			 &resp, sizeof resp))
2192		ret = -EFAULT;
2193
2194out:
2195	while (wr) {
2196		next = wr->next;
2197		kfree(wr);
2198		wr = next;
2199	}
2200
2201	return ret ? ret : in_len;
2202}
2203
2204ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2205				const char __user *buf, int in_len,
2206				int out_len)
2207{
2208	struct ib_uverbs_post_srq_recv      cmd;
2209	struct ib_uverbs_post_srq_recv_resp resp;
2210	struct ib_recv_wr                  *wr, *next, *bad_wr;
2211	struct ib_srq                      *srq;
2212	ssize_t                             ret = -EINVAL;
2213
2214	if (copy_from_user(&cmd, buf, sizeof cmd))
2215		return -EFAULT;
2216
2217	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2218				       in_len - sizeof cmd, cmd.wr_count,
2219				       cmd.sge_count, cmd.wqe_size);
2220	if (IS_ERR(wr))
2221		return PTR_ERR(wr);
2222
2223	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2224	if (!srq)
2225		goto out;
2226
2227	resp.bad_wr = 0;
2228	ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2229
2230	put_srq_read(srq);
2231
2232	if (ret)
2233		for (next = wr; next; next = next->next) {
2234			++resp.bad_wr;
2235			if (next == bad_wr)
2236				break;
2237		}
2238
2239	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2240			 &resp, sizeof resp))
2241		ret = -EFAULT;
2242
2243out:
2244	while (wr) {
2245		next = wr->next;
2246		kfree(wr);
2247		wr = next;
2248	}
2249
2250	return ret ? ret : in_len;
2251}
2252
2253ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2254			    const char __user *buf, int in_len,
2255			    int out_len)
2256{
2257	struct ib_uverbs_create_ah	 cmd;
2258	struct ib_uverbs_create_ah_resp	 resp;
2259	struct ib_uobject		*uobj;
2260	struct ib_pd			*pd;
2261	struct ib_ah			*ah;
2262	struct ib_ah_attr		attr;
2263	int ret;
2264
2265	if (out_len < sizeof resp)
2266		return -ENOSPC;
2267
2268	if (copy_from_user(&cmd, buf, sizeof cmd))
2269		return -EFAULT;
2270
2271	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2272	if (!uobj)
2273		return -ENOMEM;
2274
2275	init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
2276	down_write(&uobj->mutex);
2277
2278	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2279	if (!pd) {
2280		ret = -EINVAL;
2281		goto err;
2282	}
2283
2284	attr.dlid 	       = cmd.attr.dlid;
2285	attr.sl 	       = cmd.attr.sl;
2286	attr.src_path_bits     = cmd.attr.src_path_bits;
2287	attr.static_rate       = cmd.attr.static_rate;
2288	attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
2289	attr.port_num 	       = cmd.attr.port_num;
2290	attr.grh.flow_label    = cmd.attr.grh.flow_label;
2291	attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
2292	attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
2293	attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2294	memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2295
2296	ah = ib_create_ah(pd, &attr);
2297	if (IS_ERR(ah)) {
2298		ret = PTR_ERR(ah);
2299		goto err_put;
2300	}
2301
2302	ah->uobject  = uobj;
2303	uobj->object = ah;
2304
2305	ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2306	if (ret)
2307		goto err_destroy;
2308
2309	resp.ah_handle = uobj->id;
2310
2311	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2312			 &resp, sizeof resp)) {
2313		ret = -EFAULT;
2314		goto err_copy;
2315	}
2316
2317	put_pd_read(pd);
2318
2319	mutex_lock(&file->mutex);
2320	list_add_tail(&uobj->list, &file->ucontext->ah_list);
2321	mutex_unlock(&file->mutex);
2322
2323	uobj->live = 1;
2324
2325	up_write(&uobj->mutex);
2326
2327	return in_len;
2328
2329err_copy:
2330	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2331
2332err_destroy:
2333	ib_destroy_ah(ah);
2334
2335err_put:
2336	put_pd_read(pd);
2337
2338err:
2339	put_uobj_write(uobj);
2340	return ret;
2341}
2342
2343ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2344			     const char __user *buf, int in_len, int out_len)
2345{
2346	struct ib_uverbs_destroy_ah cmd;
2347	struct ib_ah		   *ah;
2348	struct ib_uobject	   *uobj;
2349	int			    ret;
2350
2351	if (copy_from_user(&cmd, buf, sizeof cmd))
2352		return -EFAULT;
2353
2354	uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2355	if (!uobj)
2356		return -EINVAL;
2357	ah = uobj->object;
2358
2359	ret = ib_destroy_ah(ah);
2360	if (!ret)
2361		uobj->live = 0;
2362
2363	put_uobj_write(uobj);
2364
2365	if (ret)
2366		return ret;
2367
2368	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2369
2370	mutex_lock(&file->mutex);
2371	list_del(&uobj->list);
2372	mutex_unlock(&file->mutex);
2373
2374	put_uobj(uobj);
2375
2376	return in_len;
2377}
2378
2379ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2380			       const char __user *buf, int in_len,
2381			       int out_len)
2382{
2383	struct ib_uverbs_attach_mcast cmd;
2384	struct ib_qp                 *qp;
2385	struct ib_uqp_object         *obj;
2386	struct ib_uverbs_mcast_entry *mcast;
2387	int                           ret;
2388
2389	if (copy_from_user(&cmd, buf, sizeof cmd))
2390		return -EFAULT;
2391
2392	qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2393	if (!qp)
2394		return -EINVAL;
2395
2396	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2397
2398	list_for_each_entry(mcast, &obj->mcast_list, list)
2399		if (cmd.mlid == mcast->lid &&
2400		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2401			ret = 0;
2402			goto out_put;
2403		}
2404
2405	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2406	if (!mcast) {
2407		ret = -ENOMEM;
2408		goto out_put;
2409	}
2410
2411	mcast->lid = cmd.mlid;
2412	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2413
2414	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2415	if (!ret)
2416		list_add_tail(&mcast->list, &obj->mcast_list);
2417	else
2418		kfree(mcast);
2419
2420out_put:
2421	put_qp_write(qp);
2422
2423	return ret ? ret : in_len;
2424}
2425
2426ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2427			       const char __user *buf, int in_len,
2428			       int out_len)
2429{
2430	struct ib_uverbs_detach_mcast cmd;
2431	struct ib_uqp_object         *obj;
2432	struct ib_qp                 *qp;
2433	struct ib_uverbs_mcast_entry *mcast;
2434	int                           ret = -EINVAL;
2435
2436	if (copy_from_user(&cmd, buf, sizeof cmd))
2437		return -EFAULT;
2438
2439	qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2440	if (!qp)
2441		return -EINVAL;
2442
2443	ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2444	if (ret)
2445		goto out_put;
2446
2447	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2448
2449	list_for_each_entry(mcast, &obj->mcast_list, list)
2450		if (cmd.mlid == mcast->lid &&
2451		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2452			list_del(&mcast->list);
2453			kfree(mcast);
2454			break;
2455		}
2456
2457out_put:
2458	put_qp_write(qp);
2459
2460	return ret ? ret : in_len;
2461}
2462
2463static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2464				struct ib_uverbs_create_xsrq *cmd,
2465				struct ib_udata *udata)
2466{
2467	struct ib_uverbs_create_srq_resp resp;
2468	struct ib_usrq_object           *obj;
2469	struct ib_pd                    *pd;
2470	struct ib_srq                   *srq;
2471	struct ib_uobject               *uninitialized_var(xrcd_uobj);
2472	struct ib_srq_init_attr          attr;
2473	int ret;
2474
2475	obj = kmalloc(sizeof *obj, GFP_KERNEL);
2476	if (!obj)
2477		return -ENOMEM;
2478
2479	init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key);
2480	down_write(&obj->uevent.uobject.mutex);
2481
2482	pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
2483	if (!pd) {
2484		ret = -EINVAL;
2485		goto err;
2486	}
2487
2488	if (cmd->srq_type == IB_SRQT_XRC) {
2489		attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2490		if (!attr.ext.xrc.cq) {
2491			ret = -EINVAL;
2492			goto err_put_pd;
2493		}
2494
2495		attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2496		if (!attr.ext.xrc.xrcd) {
2497			ret = -EINVAL;
2498			goto err_put_cq;
2499		}
2500
2501		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2502		atomic_inc(&obj->uxrcd->refcnt);
2503	}
2504
2505	attr.event_handler  = ib_uverbs_srq_event_handler;
2506	attr.srq_context    = file;
2507	attr.srq_type       = cmd->srq_type;
2508	attr.attr.max_wr    = cmd->max_wr;
2509	attr.attr.max_sge   = cmd->max_sge;
2510	attr.attr.srq_limit = cmd->srq_limit;
2511
2512	obj->uevent.events_reported = 0;
2513	INIT_LIST_HEAD(&obj->uevent.event_list);
2514
2515	srq = pd->device->create_srq(pd, &attr, udata);
2516	if (IS_ERR(srq)) {
2517		ret = PTR_ERR(srq);
2518		goto err_put;
2519	}
2520
2521	srq->device        = pd->device;
2522	srq->pd            = pd;
2523	srq->srq_type	   = cmd->srq_type;
2524	srq->uobject       = &obj->uevent.uobject;
2525	srq->event_handler = attr.event_handler;
2526	srq->srq_context   = attr.srq_context;
2527
2528	if (cmd->srq_type == IB_SRQT_XRC) {
2529		srq->ext.xrc.cq   = attr.ext.xrc.cq;
2530		srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
2531		atomic_inc(&attr.ext.xrc.cq->usecnt);
2532		atomic_inc(&attr.ext.xrc.xrcd->usecnt);
2533	}
2534
2535	atomic_inc(&pd->usecnt);
2536	atomic_set(&srq->usecnt, 0);
2537
2538	obj->uevent.uobject.object = srq;
2539	ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2540	if (ret)
2541		goto err_destroy;
2542
2543	memset(&resp, 0, sizeof resp);
2544	resp.srq_handle = obj->uevent.uobject.id;
2545	resp.max_wr     = attr.attr.max_wr;
2546	resp.max_sge    = attr.attr.max_sge;
2547	if (cmd->srq_type == IB_SRQT_XRC)
2548		resp.srqn = srq->ext.xrc.srq_num;
2549
2550	if (copy_to_user((void __user *) (unsigned long) cmd->response,
2551			 &resp, sizeof resp)) {
2552		ret = -EFAULT;
2553		goto err_copy;
2554	}
2555
2556	if (cmd->srq_type == IB_SRQT_XRC) {
2557		put_uobj_read(xrcd_uobj);
2558		put_cq_read(attr.ext.xrc.cq);
2559	}
2560	put_pd_read(pd);
2561
2562	mutex_lock(&file->mutex);
2563	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
2564	mutex_unlock(&file->mutex);
2565
2566	obj->uevent.uobject.live = 1;
2567
2568	up_write(&obj->uevent.uobject.mutex);
2569
2570	return 0;
2571
2572err_copy:
2573	idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2574
2575err_destroy:
2576	ib_destroy_srq(srq);
2577
2578err_put:
2579	if (cmd->srq_type == IB_SRQT_XRC) {
2580		atomic_dec(&obj->uxrcd->refcnt);
2581		put_uobj_read(xrcd_uobj);
2582	}
2583
2584err_put_cq:
2585	if (cmd->srq_type == IB_SRQT_XRC)
2586		put_cq_read(attr.ext.xrc.cq);
2587
2588err_put_pd:
2589	put_pd_read(pd);
2590
2591err:
2592	put_uobj_write(&obj->uevent.uobject);
2593	return ret;
2594}
2595
2596ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
2597			     const char __user *buf, int in_len,
2598			     int out_len)
2599{
2600	struct ib_uverbs_create_srq      cmd;
2601	struct ib_uverbs_create_xsrq     xcmd;
2602	struct ib_uverbs_create_srq_resp resp;
2603	struct ib_udata                  udata;
2604	int ret;
2605
2606	if (out_len < sizeof resp)
2607		return -ENOSPC;
2608
2609	if (copy_from_user(&cmd, buf, sizeof cmd))
2610		return -EFAULT;
2611
2612	xcmd.response	 = cmd.response;
2613	xcmd.user_handle = cmd.user_handle;
2614	xcmd.srq_type	 = IB_SRQT_BASIC;
2615	xcmd.pd_handle	 = cmd.pd_handle;
2616	xcmd.max_wr	 = cmd.max_wr;
2617	xcmd.max_sge	 = cmd.max_sge;
2618	xcmd.srq_limit	 = cmd.srq_limit;
2619
2620	INIT_UDATA(&udata, buf + sizeof cmd,
2621		   (unsigned long) cmd.response + sizeof resp,
2622		   in_len - sizeof cmd, out_len - sizeof resp);
2623
2624	ret = __uverbs_create_xsrq(file, &xcmd, &udata);
2625	if (ret)
2626		return ret;
2627
2628	return in_len;
2629}
2630
2631ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
2632			      const char __user *buf, int in_len, int out_len)
2633{
2634	struct ib_uverbs_create_xsrq     cmd;
2635	struct ib_uverbs_create_srq_resp resp;
2636	struct ib_udata                  udata;
2637	int ret;
2638
2639	if (out_len < sizeof resp)
2640		return -ENOSPC;
2641
2642	if (copy_from_user(&cmd, buf, sizeof cmd))
2643		return -EFAULT;
2644
2645	INIT_UDATA(&udata, buf + sizeof cmd,
2646		   (unsigned long) cmd.response + sizeof resp,
2647		   in_len - sizeof cmd, out_len - sizeof resp);
2648
2649	ret = __uverbs_create_xsrq(file, &cmd, &udata);
2650	if (ret)
2651		return ret;
2652
2653	return in_len;
2654}
2655
2656ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
2657			     const char __user *buf, int in_len,
2658			     int out_len)
2659{
2660	struct ib_uverbs_modify_srq cmd;
2661	struct ib_udata             udata;
2662	struct ib_srq              *srq;
2663	struct ib_srq_attr          attr;
2664	int                         ret;
2665
2666	if (copy_from_user(&cmd, buf, sizeof cmd))
2667		return -EFAULT;
2668
2669	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2670		   out_len);
2671
2672	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2673	if (!srq)
2674		return -EINVAL;
2675
2676	attr.max_wr    = cmd.max_wr;
2677	attr.srq_limit = cmd.srq_limit;
2678
2679	ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
2680
2681	put_srq_read(srq);
2682
2683	return ret ? ret : in_len;
2684}
2685
2686ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
2687			    const char __user *buf,
2688			    int in_len, int out_len)
2689{
2690	struct ib_uverbs_query_srq      cmd;
2691	struct ib_uverbs_query_srq_resp resp;
2692	struct ib_srq_attr              attr;
2693	struct ib_srq                   *srq;
2694	int                             ret;
2695
2696	if (out_len < sizeof resp)
2697		return -ENOSPC;
2698
2699	if (copy_from_user(&cmd, buf, sizeof cmd))
2700		return -EFAULT;
2701
2702	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2703	if (!srq)
2704		return -EINVAL;
2705
2706	ret = ib_query_srq(srq, &attr);
2707
2708	put_srq_read(srq);
2709
2710	if (ret)
2711		return ret;
2712
2713	memset(&resp, 0, sizeof resp);
2714
2715	resp.max_wr    = attr.max_wr;
2716	resp.max_sge   = attr.max_sge;
2717	resp.srq_limit = attr.srq_limit;
2718
2719	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2720			 &resp, sizeof resp))
2721		return -EFAULT;
2722
2723	return in_len;
2724}
2725
2726ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
2727			      const char __user *buf, int in_len,
2728			      int out_len)
2729{
2730	struct ib_uverbs_destroy_srq      cmd;
2731	struct ib_uverbs_destroy_srq_resp resp;
2732	struct ib_uobject		 *uobj;
2733	struct ib_srq               	 *srq;
2734	struct ib_uevent_object        	 *obj;
2735	int                         	  ret = -EINVAL;
2736
2737	if (copy_from_user(&cmd, buf, sizeof cmd))
2738		return -EFAULT;
2739
2740	uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
2741	if (!uobj)
2742		return -EINVAL;
2743	srq = uobj->object;
2744	obj = container_of(uobj, struct ib_uevent_object, uobject);
2745
2746	ret = ib_destroy_srq(srq);
2747	if (!ret)
2748		uobj->live = 0;
2749
2750	put_uobj_write(uobj);
2751
2752	if (ret)
2753		return ret;
2754
2755	idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
2756
2757	mutex_lock(&file->mutex);
2758	list_del(&uobj->list);
2759	mutex_unlock(&file->mutex);
2760
2761	ib_uverbs_release_uevent(file, obj);
2762
2763	memset(&resp, 0, sizeof resp);
2764	resp.events_reported = obj->events_reported;
2765
2766	put_uobj(uobj);
2767
2768	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2769			 &resp, sizeof resp))
2770		ret = -EFAULT;
2771
2772	return ret ? ret : in_len;
2773}
2774