verbs.c revision 7ce5eacb45a7c819a6bec6ed486f27db9aab6ab6
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses.  You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 *     Redistribution and use in source and binary forms, with or
17 *     without modification, are permitted provided that the following
18 *     conditions are met:
19 *
20 *      - Redistributions of source code must retain the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer.
23 *
24 *      - Redistributions in binary form must reproduce the above
25 *        copyright notice, this list of conditions and the following
26 *        disclaimer in the documentation and/or other materials
27 *        provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 *
38 * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
39 */
40
41#include <linux/errno.h>
42#include <linux/err.h>
43#include <linux/string.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_cache.h>
47
48int ib_rate_to_mult(enum ib_rate rate)
49{
50	switch (rate) {
51	case IB_RATE_2_5_GBPS: return  1;
52	case IB_RATE_5_GBPS:   return  2;
53	case IB_RATE_10_GBPS:  return  4;
54	case IB_RATE_20_GBPS:  return  8;
55	case IB_RATE_30_GBPS:  return 12;
56	case IB_RATE_40_GBPS:  return 16;
57	case IB_RATE_60_GBPS:  return 24;
58	case IB_RATE_80_GBPS:  return 32;
59	case IB_RATE_120_GBPS: return 48;
60	default:	       return -1;
61	}
62}
63EXPORT_SYMBOL(ib_rate_to_mult);
64
65enum ib_rate mult_to_ib_rate(int mult)
66{
67	switch (mult) {
68	case 1:  return IB_RATE_2_5_GBPS;
69	case 2:  return IB_RATE_5_GBPS;
70	case 4:  return IB_RATE_10_GBPS;
71	case 8:  return IB_RATE_20_GBPS;
72	case 12: return IB_RATE_30_GBPS;
73	case 16: return IB_RATE_40_GBPS;
74	case 24: return IB_RATE_60_GBPS;
75	case 32: return IB_RATE_80_GBPS;
76	case 48: return IB_RATE_120_GBPS;
77	default: return IB_RATE_PORT_CURRENT;
78	}
79}
80EXPORT_SYMBOL(mult_to_ib_rate);
81
82enum rdma_transport_type
83rdma_node_get_transport(enum rdma_node_type node_type)
84{
85	switch (node_type) {
86	case RDMA_NODE_IB_CA:
87	case RDMA_NODE_IB_SWITCH:
88	case RDMA_NODE_IB_ROUTER:
89		return RDMA_TRANSPORT_IB;
90	case RDMA_NODE_RNIC:
91		return RDMA_TRANSPORT_IWARP;
92	default:
93		BUG();
94		return 0;
95	}
96}
97EXPORT_SYMBOL(rdma_node_get_transport);
98
99/* Protection domains */
100
101struct ib_pd *ib_alloc_pd(struct ib_device *device)
102{
103	struct ib_pd *pd;
104
105	pd = device->alloc_pd(device, NULL, NULL);
106
107	if (!IS_ERR(pd)) {
108		pd->device  = device;
109		pd->uobject = NULL;
110		atomic_set(&pd->usecnt, 0);
111	}
112
113	return pd;
114}
115EXPORT_SYMBOL(ib_alloc_pd);
116
117int ib_dealloc_pd(struct ib_pd *pd)
118{
119	if (atomic_read(&pd->usecnt))
120		return -EBUSY;
121
122	return pd->device->dealloc_pd(pd);
123}
124EXPORT_SYMBOL(ib_dealloc_pd);
125
126/* Address handles */
127
128struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
129{
130	struct ib_ah *ah;
131
132	ah = pd->device->create_ah(pd, ah_attr);
133
134	if (!IS_ERR(ah)) {
135		ah->device  = pd->device;
136		ah->pd      = pd;
137		ah->uobject = NULL;
138		atomic_inc(&pd->usecnt);
139	}
140
141	return ah;
142}
143EXPORT_SYMBOL(ib_create_ah);
144
145int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
146		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
147{
148	u32 flow_class;
149	u16 gid_index;
150	int ret;
151
152	memset(ah_attr, 0, sizeof *ah_attr);
153	ah_attr->dlid = wc->slid;
154	ah_attr->sl = wc->sl;
155	ah_attr->src_path_bits = wc->dlid_path_bits;
156	ah_attr->port_num = port_num;
157
158	if (wc->wc_flags & IB_WC_GRH) {
159		ah_attr->ah_flags = IB_AH_GRH;
160		ah_attr->grh.dgid = grh->sgid;
161
162		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
163					 &gid_index);
164		if (ret)
165			return ret;
166
167		ah_attr->grh.sgid_index = (u8) gid_index;
168		flow_class = be32_to_cpu(grh->version_tclass_flow);
169		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
170		ah_attr->grh.hop_limit = 0xFF;
171		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
172	}
173	return 0;
174}
175EXPORT_SYMBOL(ib_init_ah_from_wc);
176
177struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
178				   struct ib_grh *grh, u8 port_num)
179{
180	struct ib_ah_attr ah_attr;
181	int ret;
182
183	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
184	if (ret)
185		return ERR_PTR(ret);
186
187	return ib_create_ah(pd, &ah_attr);
188}
189EXPORT_SYMBOL(ib_create_ah_from_wc);
190
191int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
192{
193	return ah->device->modify_ah ?
194		ah->device->modify_ah(ah, ah_attr) :
195		-ENOSYS;
196}
197EXPORT_SYMBOL(ib_modify_ah);
198
199int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
200{
201	return ah->device->query_ah ?
202		ah->device->query_ah(ah, ah_attr) :
203		-ENOSYS;
204}
205EXPORT_SYMBOL(ib_query_ah);
206
207int ib_destroy_ah(struct ib_ah *ah)
208{
209	struct ib_pd *pd;
210	int ret;
211
212	pd = ah->pd;
213	ret = ah->device->destroy_ah(ah);
214	if (!ret)
215		atomic_dec(&pd->usecnt);
216
217	return ret;
218}
219EXPORT_SYMBOL(ib_destroy_ah);
220
221/* Shared receive queues */
222
223struct ib_srq *ib_create_srq(struct ib_pd *pd,
224			     struct ib_srq_init_attr *srq_init_attr)
225{
226	struct ib_srq *srq;
227
228	if (!pd->device->create_srq)
229		return ERR_PTR(-ENOSYS);
230
231	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
232
233	if (!IS_ERR(srq)) {
234		srq->device    	   = pd->device;
235		srq->pd        	   = pd;
236		srq->uobject       = NULL;
237		srq->event_handler = srq_init_attr->event_handler;
238		srq->srq_context   = srq_init_attr->srq_context;
239		atomic_inc(&pd->usecnt);
240		atomic_set(&srq->usecnt, 0);
241	}
242
243	return srq;
244}
245EXPORT_SYMBOL(ib_create_srq);
246
247int ib_modify_srq(struct ib_srq *srq,
248		  struct ib_srq_attr *srq_attr,
249		  enum ib_srq_attr_mask srq_attr_mask)
250{
251	return srq->device->modify_srq ?
252		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
253		-ENOSYS;
254}
255EXPORT_SYMBOL(ib_modify_srq);
256
257int ib_query_srq(struct ib_srq *srq,
258		 struct ib_srq_attr *srq_attr)
259{
260	return srq->device->query_srq ?
261		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
262}
263EXPORT_SYMBOL(ib_query_srq);
264
265int ib_destroy_srq(struct ib_srq *srq)
266{
267	struct ib_pd *pd;
268	int ret;
269
270	if (atomic_read(&srq->usecnt))
271		return -EBUSY;
272
273	pd = srq->pd;
274
275	ret = srq->device->destroy_srq(srq);
276	if (!ret)
277		atomic_dec(&pd->usecnt);
278
279	return ret;
280}
281EXPORT_SYMBOL(ib_destroy_srq);
282
283/* Queue pairs */
284
285struct ib_qp *ib_create_qp(struct ib_pd *pd,
286			   struct ib_qp_init_attr *qp_init_attr)
287{
288	struct ib_qp *qp;
289
290	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
291
292	if (!IS_ERR(qp)) {
293		qp->device     	  = pd->device;
294		qp->pd         	  = pd;
295		qp->send_cq    	  = qp_init_attr->send_cq;
296		qp->recv_cq    	  = qp_init_attr->recv_cq;
297		qp->srq	       	  = qp_init_attr->srq;
298		qp->uobject       = NULL;
299		qp->event_handler = qp_init_attr->event_handler;
300		qp->qp_context    = qp_init_attr->qp_context;
301		qp->qp_type	  = qp_init_attr->qp_type;
302		atomic_inc(&pd->usecnt);
303		atomic_inc(&qp_init_attr->send_cq->usecnt);
304		atomic_inc(&qp_init_attr->recv_cq->usecnt);
305		if (qp_init_attr->srq)
306			atomic_inc(&qp_init_attr->srq->usecnt);
307	}
308
309	return qp;
310}
311EXPORT_SYMBOL(ib_create_qp);
312
313static const struct {
314	int			valid;
315	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETY + 1];
316	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETY + 1];
317} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
318	[IB_QPS_RESET] = {
319		[IB_QPS_RESET] = { .valid = 1 },
320		[IB_QPS_ERR]   = { .valid = 1 },
321		[IB_QPS_INIT]  = {
322			.valid = 1,
323			.req_param = {
324				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
325						IB_QP_PORT			|
326						IB_QP_QKEY),
327				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
328						IB_QP_PORT			|
329						IB_QP_ACCESS_FLAGS),
330				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
331						IB_QP_PORT			|
332						IB_QP_ACCESS_FLAGS),
333				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
334						IB_QP_QKEY),
335				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
336						IB_QP_QKEY),
337			}
338		},
339	},
340	[IB_QPS_INIT]  = {
341		[IB_QPS_RESET] = { .valid = 1 },
342		[IB_QPS_ERR] =   { .valid = 1 },
343		[IB_QPS_INIT]  = {
344			.valid = 1,
345			.opt_param = {
346				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
347						IB_QP_PORT			|
348						IB_QP_QKEY),
349				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
350						IB_QP_PORT			|
351						IB_QP_ACCESS_FLAGS),
352				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
353						IB_QP_PORT			|
354						IB_QP_ACCESS_FLAGS),
355				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
356						IB_QP_QKEY),
357				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
358						IB_QP_QKEY),
359			}
360		},
361		[IB_QPS_RTR]   = {
362			.valid = 1,
363			.req_param = {
364				[IB_QPT_UC]  = (IB_QP_AV			|
365						IB_QP_PATH_MTU			|
366						IB_QP_DEST_QPN			|
367						IB_QP_RQ_PSN),
368				[IB_QPT_RC]  = (IB_QP_AV			|
369						IB_QP_PATH_MTU			|
370						IB_QP_DEST_QPN			|
371						IB_QP_RQ_PSN			|
372						IB_QP_MAX_DEST_RD_ATOMIC	|
373						IB_QP_MIN_RNR_TIMER),
374			},
375			.opt_param = {
376				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
377						 IB_QP_QKEY),
378				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
379						 IB_QP_ACCESS_FLAGS		|
380						 IB_QP_PKEY_INDEX),
381				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
382						 IB_QP_ACCESS_FLAGS		|
383						 IB_QP_PKEY_INDEX),
384				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
385						 IB_QP_QKEY),
386				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
387						 IB_QP_QKEY),
388			 }
389		}
390	},
391	[IB_QPS_RTR]   = {
392		[IB_QPS_RESET] = { .valid = 1 },
393		[IB_QPS_ERR] =   { .valid = 1 },
394		[IB_QPS_RTS]   = {
395			.valid = 1,
396			.req_param = {
397				[IB_QPT_UD]  = IB_QP_SQ_PSN,
398				[IB_QPT_UC]  = IB_QP_SQ_PSN,
399				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
400						IB_QP_RETRY_CNT			|
401						IB_QP_RNR_RETRY			|
402						IB_QP_SQ_PSN			|
403						IB_QP_MAX_QP_RD_ATOMIC),
404				[IB_QPT_SMI] = IB_QP_SQ_PSN,
405				[IB_QPT_GSI] = IB_QP_SQ_PSN,
406			},
407			.opt_param = {
408				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
409						 IB_QP_QKEY),
410				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
411						 IB_QP_ALT_PATH			|
412						 IB_QP_ACCESS_FLAGS		|
413						 IB_QP_PATH_MIG_STATE),
414				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
415						 IB_QP_ALT_PATH			|
416						 IB_QP_ACCESS_FLAGS		|
417						 IB_QP_MIN_RNR_TIMER		|
418						 IB_QP_PATH_MIG_STATE),
419				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
420						 IB_QP_QKEY),
421				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
422						 IB_QP_QKEY),
423			 }
424		}
425	},
426	[IB_QPS_RTS]   = {
427		[IB_QPS_RESET] = { .valid = 1 },
428		[IB_QPS_ERR] =   { .valid = 1 },
429		[IB_QPS_RTS]   = {
430			.valid = 1,
431			.opt_param = {
432				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
433						IB_QP_QKEY),
434				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
435						IB_QP_ACCESS_FLAGS		|
436						IB_QP_ALT_PATH			|
437						IB_QP_PATH_MIG_STATE),
438				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
439						IB_QP_ACCESS_FLAGS		|
440						IB_QP_ALT_PATH			|
441						IB_QP_PATH_MIG_STATE		|
442						IB_QP_MIN_RNR_TIMER),
443				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
444						IB_QP_QKEY),
445				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
446						IB_QP_QKEY),
447			}
448		},
449		[IB_QPS_SQD]   = {
450			.valid = 1,
451			.opt_param = {
452				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
453				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
454				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
455				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
456				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
457			}
458		},
459	},
460	[IB_QPS_SQD]   = {
461		[IB_QPS_RESET] = { .valid = 1 },
462		[IB_QPS_ERR] =   { .valid = 1 },
463		[IB_QPS_RTS]   = {
464			.valid = 1,
465			.opt_param = {
466				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
467						IB_QP_QKEY),
468				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
469						IB_QP_ALT_PATH			|
470						IB_QP_ACCESS_FLAGS		|
471						IB_QP_PATH_MIG_STATE),
472				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
473						IB_QP_ALT_PATH			|
474						IB_QP_ACCESS_FLAGS		|
475						IB_QP_MIN_RNR_TIMER		|
476						IB_QP_PATH_MIG_STATE),
477				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
478						IB_QP_QKEY),
479				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
480						IB_QP_QKEY),
481			}
482		},
483		[IB_QPS_SQD]   = {
484			.valid = 1,
485			.opt_param = {
486				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
487						IB_QP_QKEY),
488				[IB_QPT_UC]  = (IB_QP_AV			|
489						IB_QP_ALT_PATH			|
490						IB_QP_ACCESS_FLAGS		|
491						IB_QP_PKEY_INDEX		|
492						IB_QP_PATH_MIG_STATE),
493				[IB_QPT_RC]  = (IB_QP_PORT			|
494						IB_QP_AV			|
495						IB_QP_TIMEOUT			|
496						IB_QP_RETRY_CNT			|
497						IB_QP_RNR_RETRY			|
498						IB_QP_MAX_QP_RD_ATOMIC		|
499						IB_QP_MAX_DEST_RD_ATOMIC	|
500						IB_QP_ALT_PATH			|
501						IB_QP_ACCESS_FLAGS		|
502						IB_QP_PKEY_INDEX		|
503						IB_QP_MIN_RNR_TIMER		|
504						IB_QP_PATH_MIG_STATE),
505				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
506						IB_QP_QKEY),
507				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
508						IB_QP_QKEY),
509			}
510		}
511	},
512	[IB_QPS_SQE]   = {
513		[IB_QPS_RESET] = { .valid = 1 },
514		[IB_QPS_ERR] =   { .valid = 1 },
515		[IB_QPS_RTS]   = {
516			.valid = 1,
517			.opt_param = {
518				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
519						IB_QP_QKEY),
520				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
521						IB_QP_ACCESS_FLAGS),
522				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
523						IB_QP_QKEY),
524				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
525						IB_QP_QKEY),
526			}
527		}
528	},
529	[IB_QPS_ERR] = {
530		[IB_QPS_RESET] = { .valid = 1 },
531		[IB_QPS_ERR] =   { .valid = 1 }
532	}
533};
534
535int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
536		       enum ib_qp_type type, enum ib_qp_attr_mask mask)
537{
538	enum ib_qp_attr_mask req_param, opt_param;
539
540	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
541	    next_state < 0 || next_state > IB_QPS_ERR)
542		return 0;
543
544	if (mask & IB_QP_CUR_STATE  &&
545	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
546	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
547		return 0;
548
549	if (!qp_state_table[cur_state][next_state].valid)
550		return 0;
551
552	req_param = qp_state_table[cur_state][next_state].req_param[type];
553	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
554
555	if ((mask & req_param) != req_param)
556		return 0;
557
558	if (mask & ~(req_param | opt_param | IB_QP_STATE))
559		return 0;
560
561	return 1;
562}
563EXPORT_SYMBOL(ib_modify_qp_is_ok);
564
565int ib_modify_qp(struct ib_qp *qp,
566		 struct ib_qp_attr *qp_attr,
567		 int qp_attr_mask)
568{
569	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
570}
571EXPORT_SYMBOL(ib_modify_qp);
572
573int ib_query_qp(struct ib_qp *qp,
574		struct ib_qp_attr *qp_attr,
575		int qp_attr_mask,
576		struct ib_qp_init_attr *qp_init_attr)
577{
578	return qp->device->query_qp ?
579		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
580		-ENOSYS;
581}
582EXPORT_SYMBOL(ib_query_qp);
583
584int ib_destroy_qp(struct ib_qp *qp)
585{
586	struct ib_pd *pd;
587	struct ib_cq *scq, *rcq;
588	struct ib_srq *srq;
589	int ret;
590
591	pd  = qp->pd;
592	scq = qp->send_cq;
593	rcq = qp->recv_cq;
594	srq = qp->srq;
595
596	ret = qp->device->destroy_qp(qp);
597	if (!ret) {
598		atomic_dec(&pd->usecnt);
599		atomic_dec(&scq->usecnt);
600		atomic_dec(&rcq->usecnt);
601		if (srq)
602			atomic_dec(&srq->usecnt);
603	}
604
605	return ret;
606}
607EXPORT_SYMBOL(ib_destroy_qp);
608
609/* Completion queues */
610
611struct ib_cq *ib_create_cq(struct ib_device *device,
612			   ib_comp_handler comp_handler,
613			   void (*event_handler)(struct ib_event *, void *),
614			   void *cq_context, int cqe, int comp_vector)
615{
616	struct ib_cq *cq;
617
618	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
619
620	if (!IS_ERR(cq)) {
621		cq->device        = device;
622		cq->uobject       = NULL;
623		cq->comp_handler  = comp_handler;
624		cq->event_handler = event_handler;
625		cq->cq_context    = cq_context;
626		atomic_set(&cq->usecnt, 0);
627	}
628
629	return cq;
630}
631EXPORT_SYMBOL(ib_create_cq);
632
633int ib_destroy_cq(struct ib_cq *cq)
634{
635	if (atomic_read(&cq->usecnt))
636		return -EBUSY;
637
638	return cq->device->destroy_cq(cq);
639}
640EXPORT_SYMBOL(ib_destroy_cq);
641
642int ib_resize_cq(struct ib_cq *cq, int cqe)
643{
644	return cq->device->resize_cq ?
645		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
646}
647EXPORT_SYMBOL(ib_resize_cq);
648
649/* Memory regions */
650
651struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
652{
653	struct ib_mr *mr;
654
655	mr = pd->device->get_dma_mr(pd, mr_access_flags);
656
657	if (!IS_ERR(mr)) {
658		mr->device  = pd->device;
659		mr->pd      = pd;
660		mr->uobject = NULL;
661		atomic_inc(&pd->usecnt);
662		atomic_set(&mr->usecnt, 0);
663	}
664
665	return mr;
666}
667EXPORT_SYMBOL(ib_get_dma_mr);
668
669struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
670			     struct ib_phys_buf *phys_buf_array,
671			     int num_phys_buf,
672			     int mr_access_flags,
673			     u64 *iova_start)
674{
675	struct ib_mr *mr;
676
677	if (!pd->device->reg_phys_mr)
678		return ERR_PTR(-ENOSYS);
679
680	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
681				     mr_access_flags, iova_start);
682
683	if (!IS_ERR(mr)) {
684		mr->device  = pd->device;
685		mr->pd      = pd;
686		mr->uobject = NULL;
687		atomic_inc(&pd->usecnt);
688		atomic_set(&mr->usecnt, 0);
689	}
690
691	return mr;
692}
693EXPORT_SYMBOL(ib_reg_phys_mr);
694
695int ib_rereg_phys_mr(struct ib_mr *mr,
696		     int mr_rereg_mask,
697		     struct ib_pd *pd,
698		     struct ib_phys_buf *phys_buf_array,
699		     int num_phys_buf,
700		     int mr_access_flags,
701		     u64 *iova_start)
702{
703	struct ib_pd *old_pd;
704	int ret;
705
706	if (!mr->device->rereg_phys_mr)
707		return -ENOSYS;
708
709	if (atomic_read(&mr->usecnt))
710		return -EBUSY;
711
712	old_pd = mr->pd;
713
714	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
715					phys_buf_array, num_phys_buf,
716					mr_access_flags, iova_start);
717
718	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
719		atomic_dec(&old_pd->usecnt);
720		atomic_inc(&pd->usecnt);
721	}
722
723	return ret;
724}
725EXPORT_SYMBOL(ib_rereg_phys_mr);
726
727int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
728{
729	return mr->device->query_mr ?
730		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
731}
732EXPORT_SYMBOL(ib_query_mr);
733
734int ib_dereg_mr(struct ib_mr *mr)
735{
736	struct ib_pd *pd;
737	int ret;
738
739	if (atomic_read(&mr->usecnt))
740		return -EBUSY;
741
742	pd = mr->pd;
743	ret = mr->device->dereg_mr(mr);
744	if (!ret)
745		atomic_dec(&pd->usecnt);
746
747	return ret;
748}
749EXPORT_SYMBOL(ib_dereg_mr);
750
751/* Memory windows */
752
753struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
754{
755	struct ib_mw *mw;
756
757	if (!pd->device->alloc_mw)
758		return ERR_PTR(-ENOSYS);
759
760	mw = pd->device->alloc_mw(pd);
761	if (!IS_ERR(mw)) {
762		mw->device  = pd->device;
763		mw->pd      = pd;
764		mw->uobject = NULL;
765		atomic_inc(&pd->usecnt);
766	}
767
768	return mw;
769}
770EXPORT_SYMBOL(ib_alloc_mw);
771
772int ib_dealloc_mw(struct ib_mw *mw)
773{
774	struct ib_pd *pd;
775	int ret;
776
777	pd = mw->pd;
778	ret = mw->device->dealloc_mw(mw);
779	if (!ret)
780		atomic_dec(&pd->usecnt);
781
782	return ret;
783}
784EXPORT_SYMBOL(ib_dealloc_mw);
785
786/* "Fast" memory regions */
787
788struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
789			    int mr_access_flags,
790			    struct ib_fmr_attr *fmr_attr)
791{
792	struct ib_fmr *fmr;
793
794	if (!pd->device->alloc_fmr)
795		return ERR_PTR(-ENOSYS);
796
797	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
798	if (!IS_ERR(fmr)) {
799		fmr->device = pd->device;
800		fmr->pd     = pd;
801		atomic_inc(&pd->usecnt);
802	}
803
804	return fmr;
805}
806EXPORT_SYMBOL(ib_alloc_fmr);
807
808int ib_unmap_fmr(struct list_head *fmr_list)
809{
810	struct ib_fmr *fmr;
811
812	if (list_empty(fmr_list))
813		return 0;
814
815	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
816	return fmr->device->unmap_fmr(fmr_list);
817}
818EXPORT_SYMBOL(ib_unmap_fmr);
819
820int ib_dealloc_fmr(struct ib_fmr *fmr)
821{
822	struct ib_pd *pd;
823	int ret;
824
825	pd = fmr->pd;
826	ret = fmr->device->dealloc_fmr(fmr);
827	if (!ret)
828		atomic_dec(&pd->usecnt);
829
830	return ret;
831}
832EXPORT_SYMBOL(ib_dealloc_fmr);
833
834/* Multicast groups */
835
836int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
837{
838	if (!qp->device->attach_mcast)
839		return -ENOSYS;
840	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
841		return -EINVAL;
842
843	return qp->device->attach_mcast(qp, gid, lid);
844}
845EXPORT_SYMBOL(ib_attach_mcast);
846
847int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
848{
849	if (!qp->device->detach_mcast)
850		return -ENOSYS;
851	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
852		return -EINVAL;
853
854	return qp->device->detach_mcast(qp, gid, lid);
855}
856EXPORT_SYMBOL(ib_detach_mcast);
857