mthca_srq.c revision fd02e8038eb943755e8727a0ea193c037a51714f
1/*
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
33 */
34
35#include <linux/slab.h>
36#include <linux/string.h>
37
38#include "mthca_dev.h"
39#include "mthca_cmd.h"
40#include "mthca_memfree.h"
41#include "mthca_wqe.h"
42
43enum {
44	MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
45};
46
47struct mthca_tavor_srq_context {
48	__be64 wqe_base_ds;	/* low 6 bits is descriptor size */
49	__be32 state_pd;
50	__be32 lkey;
51	__be32 uar;
52	__be16 limit_watermark;
53	__be16 wqe_cnt;
54	u32    reserved[2];
55};
56
57struct mthca_arbel_srq_context {
58	__be32 state_logsize_srqn;
59	__be32 lkey;
60	__be32 db_index;
61	__be32 logstride_usrpage;
62	__be64 wqe_base;
63	__be32 eq_pd;
64	__be16 limit_watermark;
65	__be16 wqe_cnt;
66	u16    reserved1;
67	__be16 wqe_counter;
68	u32    reserved2[3];
69};
70
71static void *get_wqe(struct mthca_srq *srq, int n)
72{
73	if (srq->is_direct)
74		return srq->queue.direct.buf + (n << srq->wqe_shift);
75	else
76		return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
77			((n << srq->wqe_shift) & (PAGE_SIZE - 1));
78}
79
80/*
81 * Return a pointer to the location within a WQE that we're using as a
82 * link when the WQE is in the free list.  We use the imm field
83 * because in the Tavor case, posting a WQE may overwrite the next
84 * segment of the previous WQE, but a receive WQE will never touch the
85 * imm field.  This avoids corrupting our free list if the previous
86 * WQE has already completed and been put on the free list when we
87 * post the next WQE.
88 */
89static inline int *wqe_to_link(void *wqe)
90{
91	return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
92}
93
94static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
95					 struct mthca_pd *pd,
96					 struct mthca_srq *srq,
97					 struct mthca_tavor_srq_context *context)
98{
99	memset(context, 0, sizeof *context);
100
101	context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
102	context->state_pd    = cpu_to_be32(pd->pd_num);
103	context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
104
105	if (pd->ibpd.uobject)
106		context->uar =
107			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
108	else
109		context->uar = cpu_to_be32(dev->driver_uar.index);
110}
111
112static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
113					 struct mthca_pd *pd,
114					 struct mthca_srq *srq,
115					 struct mthca_arbel_srq_context *context)
116{
117	int logsize;
118
119	memset(context, 0, sizeof *context);
120
121	logsize = long_log2(srq->max) + srq->wqe_shift;
122	context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
123	context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
124	context->db_index = cpu_to_be32(srq->db_index);
125	context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
126	if (pd->ibpd.uobject)
127		context->logstride_usrpage |=
128			cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
129	else
130		context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
131	context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
132}
133
134static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
135{
136	mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
137		       srq->is_direct, &srq->mr);
138	kfree(srq->wrid);
139}
140
141static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
142			       struct mthca_srq *srq)
143{
144	struct mthca_data_seg *scatter;
145	void *wqe;
146	int err;
147	int i;
148
149	if (pd->ibpd.uobject)
150		return 0;
151
152	srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
153	if (!srq->wrid)
154		return -ENOMEM;
155
156	err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
157			      MTHCA_MAX_DIRECT_SRQ_SIZE,
158			      &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
159	if (err) {
160		kfree(srq->wrid);
161		return err;
162	}
163
164	/*
165	 * Now initialize the SRQ buffer so that all of the WQEs are
166	 * linked into the list of free WQEs.  In addition, set the
167	 * scatter list L_Keys to the sentry value of 0x100.
168	 */
169	for (i = 0; i < srq->max; ++i) {
170		wqe = get_wqe(srq, i);
171
172		*wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
173
174		for (scatter = wqe + sizeof (struct mthca_next_seg);
175		     (void *) scatter < wqe + (1 << srq->wqe_shift);
176		     ++scatter)
177			scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
178	}
179
180	srq->last = get_wqe(srq, srq->max - 1);
181
182	return 0;
183}
184
185int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
186		    struct ib_srq_attr *attr, struct mthca_srq *srq)
187{
188	struct mthca_mailbox *mailbox;
189	u8 status;
190	int ds;
191	int err;
192
193	/* Sanity check SRQ size before proceeding */
194	if (attr->max_wr  > dev->limits.max_srq_wqes ||
195	    attr->max_sge > dev->limits.max_sg)
196		return -EINVAL;
197
198	srq->max      = attr->max_wr;
199	srq->max_gs   = attr->max_sge;
200	srq->counter  = 0;
201
202	if (mthca_is_memfree(dev))
203		srq->max = roundup_pow_of_two(srq->max + 1);
204
205	ds = max(64UL,
206		 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
207				    srq->max_gs * sizeof (struct mthca_data_seg)));
208	srq->wqe_shift = long_log2(ds);
209
210	srq->srqn = mthca_alloc(&dev->srq_table.alloc);
211	if (srq->srqn == -1)
212		return -ENOMEM;
213
214	if (mthca_is_memfree(dev)) {
215		err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
216		if (err)
217			goto err_out;
218
219		if (!pd->ibpd.uobject) {
220			srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
221						       srq->srqn, &srq->db);
222			if (srq->db_index < 0) {
223				err = -ENOMEM;
224				goto err_out_icm;
225			}
226		}
227	}
228
229	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
230	if (IS_ERR(mailbox)) {
231		err = PTR_ERR(mailbox);
232		goto err_out_db;
233	}
234
235	err = mthca_alloc_srq_buf(dev, pd, srq);
236	if (err)
237		goto err_out_mailbox;
238
239	spin_lock_init(&srq->lock);
240	atomic_set(&srq->refcount, 1);
241	init_waitqueue_head(&srq->wait);
242
243	if (mthca_is_memfree(dev))
244		mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
245	else
246		mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
247
248	err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
249
250	if (err) {
251		mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
252		goto err_out_free_buf;
253	}
254	if (status) {
255		mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
256			   status);
257		err = -EINVAL;
258		goto err_out_free_buf;
259	}
260
261	spin_lock_irq(&dev->srq_table.lock);
262	if (mthca_array_set(&dev->srq_table.srq,
263			    srq->srqn & (dev->limits.num_srqs - 1),
264			    srq)) {
265		spin_unlock_irq(&dev->srq_table.lock);
266		goto err_out_free_srq;
267	}
268	spin_unlock_irq(&dev->srq_table.lock);
269
270	mthca_free_mailbox(dev, mailbox);
271
272	srq->first_free = 0;
273	srq->last_free  = srq->max - 1;
274
275	attr->max_wr    = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
276	attr->max_sge   = srq->max_gs;
277
278	return 0;
279
280err_out_free_srq:
281	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
282	if (err)
283		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
284	else if (status)
285		mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
286
287err_out_free_buf:
288	if (!pd->ibpd.uobject)
289		mthca_free_srq_buf(dev, srq);
290
291err_out_mailbox:
292	mthca_free_mailbox(dev, mailbox);
293
294err_out_db:
295	if (!pd->ibpd.uobject && mthca_is_memfree(dev))
296		mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
297
298err_out_icm:
299	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
300
301err_out:
302	mthca_free(&dev->srq_table.alloc, srq->srqn);
303
304	return err;
305}
306
307void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
308{
309	struct mthca_mailbox *mailbox;
310	int err;
311	u8 status;
312
313	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
314	if (IS_ERR(mailbox)) {
315		mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
316		return;
317	}
318
319	err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
320	if (err)
321		mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
322	else if (status)
323		mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
324
325	spin_lock_irq(&dev->srq_table.lock);
326	mthca_array_clear(&dev->srq_table.srq,
327			  srq->srqn & (dev->limits.num_srqs - 1));
328	spin_unlock_irq(&dev->srq_table.lock);
329
330	atomic_dec(&srq->refcount);
331	wait_event(srq->wait, !atomic_read(&srq->refcount));
332
333	if (!srq->ibsrq.uobject) {
334		mthca_free_srq_buf(dev, srq);
335		if (mthca_is_memfree(dev))
336			mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
337	}
338
339	mthca_table_put(dev, dev->srq_table.table, srq->srqn);
340	mthca_free(&dev->srq_table.alloc, srq->srqn);
341	mthca_free_mailbox(dev, mailbox);
342}
343
344int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
345		     enum ib_srq_attr_mask attr_mask)
346{
347	struct mthca_dev *dev = to_mdev(ibsrq->device);
348	struct mthca_srq *srq = to_msrq(ibsrq);
349	int ret;
350	u8 status;
351
352	/* We don't support resizing SRQs (yet?) */
353	if (attr_mask & IB_SRQ_MAX_WR)
354		return -EINVAL;
355
356	if (attr_mask & IB_SRQ_LIMIT) {
357		ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
358		if (ret)
359			return ret;
360		if (status)
361			return -EINVAL;
362	}
363
364	return 0;
365}
366
367int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
368{
369	struct mthca_dev *dev = to_mdev(ibsrq->device);
370	struct mthca_srq *srq = to_msrq(ibsrq);
371	struct mthca_mailbox *mailbox;
372	struct mthca_arbel_srq_context *arbel_ctx;
373	struct mthca_tavor_srq_context *tavor_ctx;
374	u8 status;
375	int err;
376
377	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
378	if (IS_ERR(mailbox))
379		return PTR_ERR(mailbox);
380
381	err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
382	if (err)
383		goto out;
384
385	if (mthca_is_memfree(dev)) {
386		arbel_ctx = mailbox->buf;
387		srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
388	} else {
389		tavor_ctx = mailbox->buf;
390		srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
391	}
392
393	srq_attr->max_wr  = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max;
394	srq_attr->max_sge = srq->max_gs;
395
396out:
397	mthca_free_mailbox(dev, mailbox);
398
399	return err;
400}
401
402void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
403		     enum ib_event_type event_type)
404{
405	struct mthca_srq *srq;
406	struct ib_event event;
407
408	spin_lock(&dev->srq_table.lock);
409	srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
410	if (srq)
411		atomic_inc(&srq->refcount);
412	spin_unlock(&dev->srq_table.lock);
413
414	if (!srq) {
415		mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
416		return;
417	}
418
419	if (!srq->ibsrq.event_handler)
420		goto out;
421
422	event.device      = &dev->ib_dev;
423	event.event       = event_type;
424	event.element.srq = &srq->ibsrq;
425	srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
426
427out:
428	if (atomic_dec_and_test(&srq->refcount))
429		wake_up(&srq->wait);
430}
431
432/*
433 * This function must be called with IRQs disabled.
434 */
435void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
436{
437	int ind;
438
439	ind = wqe_addr >> srq->wqe_shift;
440
441	spin_lock(&srq->lock);
442
443	if (likely(srq->first_free >= 0))
444		*wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
445	else
446		srq->first_free = ind;
447
448	*wqe_to_link(get_wqe(srq, ind)) = -1;
449	srq->last_free = ind;
450
451	spin_unlock(&srq->lock);
452}
453
454int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
455			      struct ib_recv_wr **bad_wr)
456{
457	struct mthca_dev *dev = to_mdev(ibsrq->device);
458	struct mthca_srq *srq = to_msrq(ibsrq);
459	__be32 doorbell[2];
460	unsigned long flags;
461	int err = 0;
462	int first_ind;
463	int ind;
464	int next_ind;
465	int nreq;
466	int i;
467	void *wqe;
468	void *prev_wqe;
469
470	spin_lock_irqsave(&srq->lock, flags);
471
472	first_ind = srq->first_free;
473
474	for (nreq = 0; wr; ++nreq, wr = wr->next) {
475		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
476			nreq = 0;
477
478			doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
479			doorbell[1] = cpu_to_be32(srq->srqn << 8);
480
481			/*
482			 * Make sure that descriptors are written
483			 * before doorbell is rung.
484			 */
485			wmb();
486
487			mthca_write64(doorbell,
488				      dev->kar + MTHCA_RECEIVE_DOORBELL,
489				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
490
491			first_ind = srq->first_free;
492		}
493
494		ind = srq->first_free;
495
496		if (ind < 0) {
497			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
498			err = -ENOMEM;
499			*bad_wr = wr;
500			break;
501		}
502
503		wqe       = get_wqe(srq, ind);
504		next_ind  = *wqe_to_link(wqe);
505
506		if (next_ind < 0) {
507			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
508			err = -ENOMEM;
509			*bad_wr = wr;
510			break;
511		}
512
513		prev_wqe  = srq->last;
514		srq->last = wqe;
515
516		((struct mthca_next_seg *) wqe)->nda_op = 0;
517		((struct mthca_next_seg *) wqe)->ee_nds = 0;
518		/* flags field will always remain 0 */
519
520		wqe += sizeof (struct mthca_next_seg);
521
522		if (unlikely(wr->num_sge > srq->max_gs)) {
523			err = -EINVAL;
524			*bad_wr = wr;
525			srq->last = prev_wqe;
526			break;
527		}
528
529		for (i = 0; i < wr->num_sge; ++i) {
530			((struct mthca_data_seg *) wqe)->byte_count =
531				cpu_to_be32(wr->sg_list[i].length);
532			((struct mthca_data_seg *) wqe)->lkey =
533				cpu_to_be32(wr->sg_list[i].lkey);
534			((struct mthca_data_seg *) wqe)->addr =
535				cpu_to_be64(wr->sg_list[i].addr);
536			wqe += sizeof (struct mthca_data_seg);
537		}
538
539		if (i < srq->max_gs) {
540			((struct mthca_data_seg *) wqe)->byte_count = 0;
541			((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
542			((struct mthca_data_seg *) wqe)->addr = 0;
543		}
544
545		((struct mthca_next_seg *) prev_wqe)->nda_op =
546			cpu_to_be32((ind << srq->wqe_shift) | 1);
547		wmb();
548		((struct mthca_next_seg *) prev_wqe)->ee_nds =
549			cpu_to_be32(MTHCA_NEXT_DBD);
550
551		srq->wrid[ind]  = wr->wr_id;
552		srq->first_free = next_ind;
553	}
554
555	if (likely(nreq)) {
556		doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
557		doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
558
559		/*
560		 * Make sure that descriptors are written before
561		 * doorbell is rung.
562		 */
563		wmb();
564
565		mthca_write64(doorbell,
566			      dev->kar + MTHCA_RECEIVE_DOORBELL,
567			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
568	}
569
570	spin_unlock_irqrestore(&srq->lock, flags);
571	return err;
572}
573
574int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
575			      struct ib_recv_wr **bad_wr)
576{
577	struct mthca_dev *dev = to_mdev(ibsrq->device);
578	struct mthca_srq *srq = to_msrq(ibsrq);
579	unsigned long flags;
580	int err = 0;
581	int ind;
582	int next_ind;
583	int nreq;
584	int i;
585	void *wqe;
586
587	spin_lock_irqsave(&srq->lock, flags);
588
589	for (nreq = 0; wr; ++nreq, wr = wr->next) {
590		ind = srq->first_free;
591
592		if (ind < 0) {
593			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
594			err = -ENOMEM;
595			*bad_wr = wr;
596			break;
597		}
598
599		wqe       = get_wqe(srq, ind);
600		next_ind  = *wqe_to_link(wqe);
601
602		if (next_ind < 0) {
603			mthca_err(dev, "SRQ %06x full\n", srq->srqn);
604			err = -ENOMEM;
605			*bad_wr = wr;
606			break;
607		}
608
609		((struct mthca_next_seg *) wqe)->nda_op =
610			cpu_to_be32((next_ind << srq->wqe_shift) | 1);
611		((struct mthca_next_seg *) wqe)->ee_nds = 0;
612		/* flags field will always remain 0 */
613
614		wqe += sizeof (struct mthca_next_seg);
615
616		if (unlikely(wr->num_sge > srq->max_gs)) {
617			err = -EINVAL;
618			*bad_wr = wr;
619			break;
620		}
621
622		for (i = 0; i < wr->num_sge; ++i) {
623			((struct mthca_data_seg *) wqe)->byte_count =
624				cpu_to_be32(wr->sg_list[i].length);
625			((struct mthca_data_seg *) wqe)->lkey =
626				cpu_to_be32(wr->sg_list[i].lkey);
627			((struct mthca_data_seg *) wqe)->addr =
628				cpu_to_be64(wr->sg_list[i].addr);
629			wqe += sizeof (struct mthca_data_seg);
630		}
631
632		if (i < srq->max_gs) {
633			((struct mthca_data_seg *) wqe)->byte_count = 0;
634			((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
635			((struct mthca_data_seg *) wqe)->addr = 0;
636		}
637
638		srq->wrid[ind]  = wr->wr_id;
639		srq->first_free = next_ind;
640	}
641
642	if (likely(nreq)) {
643		srq->counter += nreq;
644
645		/*
646		 * Make sure that descriptors are written before
647		 * we write doorbell record.
648		 */
649		wmb();
650		*srq->db = cpu_to_be32(srq->counter);
651	}
652
653	spin_unlock_irqrestore(&srq->lock, flags);
654	return err;
655}
656
657int __devinit mthca_init_srq_table(struct mthca_dev *dev)
658{
659	int err;
660
661	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
662		return 0;
663
664	spin_lock_init(&dev->srq_table.lock);
665
666	err = mthca_alloc_init(&dev->srq_table.alloc,
667			       dev->limits.num_srqs,
668			       dev->limits.num_srqs - 1,
669			       dev->limits.reserved_srqs);
670	if (err)
671		return err;
672
673	err = mthca_array_init(&dev->srq_table.srq,
674			       dev->limits.num_srqs);
675	if (err)
676		mthca_alloc_cleanup(&dev->srq_table.alloc);
677
678	return err;
679}
680
681void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
682{
683	if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
684		return;
685
686	mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
687	mthca_alloc_cleanup(&dev->srq_table.alloc);
688}
689