srq.c revision fe66bb2db51c9847c23682ef9c140bab6e14b0fa
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35
36#include <linux/mlx4/cmd.h>
37#include <linux/mlx4/srq.h>
38#include <linux/export.h>
39#include <linux/gfp.h>
40
41#include "mlx4.h"
42#include "icm.h"
43
44void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
45{
46	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
47	struct mlx4_srq *srq;
48
49	spin_lock(&srq_table->lock);
50
51	srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
52	if (srq)
53		atomic_inc(&srq->refcount);
54
55	spin_unlock(&srq_table->lock);
56
57	if (!srq) {
58		mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
59		return;
60	}
61
62	srq->event(srq, event_type);
63
64	if (atomic_dec_and_test(&srq->refcount))
65		complete(&srq->free);
66}
67
68static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
69			  int srq_num)
70{
71	return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
72			MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
73			MLX4_CMD_WRAPPED);
74}
75
76static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
77			  int srq_num)
78{
79	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
80			    mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
81			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
82}
83
84static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
85{
86	return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
87			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
88}
89
90static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
91			  int srq_num)
92{
93	return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
94			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
95}
96
97int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
98{
99	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
100	int err;
101
102
103	*srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
104	if (*srqn == -1)
105		return -ENOMEM;
106
107	err = mlx4_table_get(dev, &srq_table->table, *srqn);
108	if (err)
109		goto err_out;
110
111	err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
112	if (err)
113		goto err_put;
114	return 0;
115
116err_put:
117	mlx4_table_put(dev, &srq_table->table, *srqn);
118
119err_out:
120	mlx4_bitmap_free(&srq_table->bitmap, *srqn);
121	return err;
122}
123
124static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
125{
126	u64 out_param;
127	int err;
128
129	if (mlx4_is_mfunc(dev)) {
130		err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
131				   RES_OP_RESERVE_AND_MAP,
132				   MLX4_CMD_ALLOC_RES,
133				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
134		if (!err)
135			*srqn = get_param_l(&out_param);
136
137		return err;
138	}
139	return __mlx4_srq_alloc_icm(dev, srqn);
140}
141
142void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
143{
144	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
145
146	mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
147	mlx4_table_put(dev, &srq_table->table, srqn);
148	mlx4_bitmap_free(&srq_table->bitmap, srqn);
149}
150
151static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
152{
153	u64 in_param = 0;
154
155	if (mlx4_is_mfunc(dev)) {
156		set_param_l(&in_param, srqn);
157		if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
158			     MLX4_CMD_FREE_RES,
159			     MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
160			mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
161		return;
162	}
163	__mlx4_srq_free_icm(dev, srqn);
164}
165
166int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
167		   struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
168{
169	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
170	struct mlx4_cmd_mailbox *mailbox;
171	struct mlx4_srq_context *srq_context;
172	u64 mtt_addr;
173	int err;
174
175	err = mlx4_srq_alloc_icm(dev, &srq->srqn);
176	if (err)
177		return err;
178
179	spin_lock_irq(&srq_table->lock);
180	err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
181	spin_unlock_irq(&srq_table->lock);
182	if (err)
183		goto err_icm;
184
185	mailbox = mlx4_alloc_cmd_mailbox(dev);
186	if (IS_ERR(mailbox)) {
187		err = PTR_ERR(mailbox);
188		goto err_radix;
189	}
190
191	srq_context = mailbox->buf;
192	memset(srq_context, 0, sizeof *srq_context);
193
194	srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
195						      srq->srqn);
196	srq_context->logstride          = srq->wqe_shift - 4;
197	srq_context->xrcd		= cpu_to_be16(xrcd);
198	srq_context->pg_offset_cqn	= cpu_to_be32(cqn & 0xffffff);
199	srq_context->log_page_size      = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
200
201	mtt_addr = mlx4_mtt_addr(dev, mtt);
202	srq_context->mtt_base_addr_h    = mtt_addr >> 32;
203	srq_context->mtt_base_addr_l    = cpu_to_be32(mtt_addr & 0xffffffff);
204	srq_context->pd			= cpu_to_be32(pdn);
205	srq_context->db_rec_addr        = cpu_to_be64(db_rec);
206
207	err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
208	mlx4_free_cmd_mailbox(dev, mailbox);
209	if (err)
210		goto err_radix;
211
212	atomic_set(&srq->refcount, 1);
213	init_completion(&srq->free);
214
215	return 0;
216
217err_radix:
218	spin_lock_irq(&srq_table->lock);
219	radix_tree_delete(&srq_table->tree, srq->srqn);
220	spin_unlock_irq(&srq_table->lock);
221
222err_icm:
223	mlx4_srq_free_icm(dev, srq->srqn);
224	return err;
225}
226EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
227
228void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
229{
230	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
231	int err;
232
233	err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
234	if (err)
235		mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
236
237	spin_lock_irq(&srq_table->lock);
238	radix_tree_delete(&srq_table->tree, srq->srqn);
239	spin_unlock_irq(&srq_table->lock);
240
241	if (atomic_dec_and_test(&srq->refcount))
242		complete(&srq->free);
243	wait_for_completion(&srq->free);
244
245	mlx4_srq_free_icm(dev, srq->srqn);
246}
247EXPORT_SYMBOL_GPL(mlx4_srq_free);
248
249int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
250{
251	return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
252}
253EXPORT_SYMBOL_GPL(mlx4_srq_arm);
254
255int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
256{
257	struct mlx4_cmd_mailbox *mailbox;
258	struct mlx4_srq_context *srq_context;
259	int err;
260
261	mailbox = mlx4_alloc_cmd_mailbox(dev);
262	if (IS_ERR(mailbox))
263		return PTR_ERR(mailbox);
264
265	srq_context = mailbox->buf;
266
267	err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
268	if (err)
269		goto err_out;
270	*limit_watermark = be16_to_cpu(srq_context->limit_watermark);
271
272err_out:
273	mlx4_free_cmd_mailbox(dev, mailbox);
274	return err;
275}
276EXPORT_SYMBOL_GPL(mlx4_srq_query);
277
278int mlx4_init_srq_table(struct mlx4_dev *dev)
279{
280	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
281	int err;
282
283	spin_lock_init(&srq_table->lock);
284	INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
285	if (mlx4_is_slave(dev))
286		return 0;
287
288	err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
289			       dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
290	if (err)
291		return err;
292
293	return 0;
294}
295
296void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
297{
298	if (mlx4_is_slave(dev))
299		return;
300	mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
301}
302
303struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
304{
305	struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
306	struct mlx4_srq *srq;
307	unsigned long flags;
308
309	spin_lock_irqsave(&srq_table->lock, flags);
310	srq = radix_tree_lookup(&srq_table->tree,
311				srqn & (dev->caps.num_srqs - 1));
312	spin_unlock_irqrestore(&srq_table->lock, flags);
313
314	return srq;
315}
316EXPORT_SYMBOL_GPL(mlx4_srq_lookup);
317