1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses.  You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 *     Redistribution and use in source and binary forms, with or
15 *     without modification, are permitted provided that the following
16 *     conditions are met:
17 *
18 *      - Redistributions of source code must retain the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer.
21 *
22 *      - Redistributions in binary form must reproduce the above
23 *        copyright notice, this list of conditions and the following
24 *        disclaimer in the documentation and/or other materials
25 *        provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#include <linux/init.h>
38#include <linux/hardirq.h>
39#include <linux/export.h>
40
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/cq.h>
43
44#include "mlx4.h"
45#include "icm.h"
46
47#define MLX4_CQ_STATUS_OK		( 0 << 28)
48#define MLX4_CQ_STATUS_OVERFLOW		( 9 << 28)
49#define MLX4_CQ_STATUS_WRITE_FAIL	(10 << 28)
50#define MLX4_CQ_FLAG_CC			( 1 << 18)
51#define MLX4_CQ_FLAG_OI			( 1 << 17)
52#define MLX4_CQ_STATE_ARMED		( 9 <<  8)
53#define MLX4_CQ_STATE_ARMED_SOL		( 6 <<  8)
54#define MLX4_EQ_STATE_FIRED		(10 <<  8)
55
56void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
57{
58	struct mlx4_cq *cq;
59
60	cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
61			       cqn & (dev->caps.num_cqs - 1));
62	if (!cq) {
63		mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
64		return;
65	}
66
67	++cq->arm_sn;
68
69	cq->comp(cq);
70}
71
72void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
73{
74	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
75	struct mlx4_cq *cq;
76
77	spin_lock(&cq_table->lock);
78
79	cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
80	if (cq)
81		atomic_inc(&cq->refcount);
82
83	spin_unlock(&cq_table->lock);
84
85	if (!cq) {
86		mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
87		return;
88	}
89
90	cq->event(cq, event_type);
91
92	if (atomic_dec_and_test(&cq->refcount))
93		complete(&cq->free);
94}
95
96static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
97			 int cq_num)
98{
99	return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
100			MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
101			MLX4_CMD_WRAPPED);
102}
103
104static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
105			 int cq_num, u32 opmod)
106{
107	return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
108			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
109}
110
111static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
112			 int cq_num)
113{
114	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
115			    cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
116			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
117}
118
119int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
120		   u16 count, u16 period)
121{
122	struct mlx4_cmd_mailbox *mailbox;
123	struct mlx4_cq_context *cq_context;
124	int err;
125
126	mailbox = mlx4_alloc_cmd_mailbox(dev);
127	if (IS_ERR(mailbox))
128		return PTR_ERR(mailbox);
129
130	cq_context = mailbox->buf;
131	memset(cq_context, 0, sizeof *cq_context);
132
133	cq_context->cq_max_count = cpu_to_be16(count);
134	cq_context->cq_period    = cpu_to_be16(period);
135
136	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
137
138	mlx4_free_cmd_mailbox(dev, mailbox);
139	return err;
140}
141EXPORT_SYMBOL_GPL(mlx4_cq_modify);
142
143int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
144		   int entries, struct mlx4_mtt *mtt)
145{
146	struct mlx4_cmd_mailbox *mailbox;
147	struct mlx4_cq_context *cq_context;
148	u64 mtt_addr;
149	int err;
150
151	mailbox = mlx4_alloc_cmd_mailbox(dev);
152	if (IS_ERR(mailbox))
153		return PTR_ERR(mailbox);
154
155	cq_context = mailbox->buf;
156	memset(cq_context, 0, sizeof *cq_context);
157
158	cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
159	cq_context->log_page_size   = mtt->page_shift - 12;
160	mtt_addr = mlx4_mtt_addr(dev, mtt);
161	cq_context->mtt_base_addr_h = mtt_addr >> 32;
162	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
163
164	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
165
166	mlx4_free_cmd_mailbox(dev, mailbox);
167	return err;
168}
169EXPORT_SYMBOL_GPL(mlx4_cq_resize);
170
171int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
172{
173	struct mlx4_priv *priv = mlx4_priv(dev);
174	struct mlx4_cq_table *cq_table = &priv->cq_table;
175	int err;
176
177	*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
178	if (*cqn == -1)
179		return -ENOMEM;
180
181	err = mlx4_table_get(dev, &cq_table->table, *cqn);
182	if (err)
183		goto err_out;
184
185	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
186	if (err)
187		goto err_put;
188	return 0;
189
190err_put:
191	mlx4_table_put(dev, &cq_table->table, *cqn);
192
193err_out:
194	mlx4_bitmap_free(&cq_table->bitmap, *cqn);
195	return err;
196}
197
198static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
199{
200	u64 out_param;
201	int err;
202
203	if (mlx4_is_mfunc(dev)) {
204		err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
205				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
206				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
207		if (err)
208			return err;
209		else {
210			*cqn = get_param_l(&out_param);
211			return 0;
212		}
213	}
214	return __mlx4_cq_alloc_icm(dev, cqn);
215}
216
217void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
218{
219	struct mlx4_priv *priv = mlx4_priv(dev);
220	struct mlx4_cq_table *cq_table = &priv->cq_table;
221
222	mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
223	mlx4_table_put(dev, &cq_table->table, cqn);
224	mlx4_bitmap_free(&cq_table->bitmap, cqn);
225}
226
227static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
228{
229	u64 in_param;
230	int err;
231
232	if (mlx4_is_mfunc(dev)) {
233		set_param_l(&in_param, cqn);
234		err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
235			       MLX4_CMD_FREE_RES,
236			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
237		if (err)
238			mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
239	} else
240		__mlx4_cq_free_icm(dev, cqn);
241}
242
243int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
244		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
245		  unsigned vector, int collapsed)
246{
247	struct mlx4_priv *priv = mlx4_priv(dev);
248	struct mlx4_cq_table *cq_table = &priv->cq_table;
249	struct mlx4_cmd_mailbox *mailbox;
250	struct mlx4_cq_context *cq_context;
251	u64 mtt_addr;
252	int err;
253
254	if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
255		return -EINVAL;
256
257	cq->vector = vector;
258
259	err = mlx4_cq_alloc_icm(dev, &cq->cqn);
260	if (err)
261		return err;
262
263	spin_lock_irq(&cq_table->lock);
264	err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
265	spin_unlock_irq(&cq_table->lock);
266	if (err)
267		goto err_icm;
268
269	mailbox = mlx4_alloc_cmd_mailbox(dev);
270	if (IS_ERR(mailbox)) {
271		err = PTR_ERR(mailbox);
272		goto err_radix;
273	}
274
275	cq_context = mailbox->buf;
276	memset(cq_context, 0, sizeof *cq_context);
277
278	cq_context->flags	    = cpu_to_be32(!!collapsed << 18);
279	cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
280	cq_context->comp_eqn	    = priv->eq_table.eq[vector].eqn;
281	cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
282
283	mtt_addr = mlx4_mtt_addr(dev, mtt);
284	cq_context->mtt_base_addr_h = mtt_addr >> 32;
285	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
286	cq_context->db_rec_addr     = cpu_to_be64(db_rec);
287
288	err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
289	mlx4_free_cmd_mailbox(dev, mailbox);
290	if (err)
291		goto err_radix;
292
293	cq->cons_index = 0;
294	cq->arm_sn     = 1;
295	cq->uar        = uar;
296	atomic_set(&cq->refcount, 1);
297	init_completion(&cq->free);
298
299	return 0;
300
301err_radix:
302	spin_lock_irq(&cq_table->lock);
303	radix_tree_delete(&cq_table->tree, cq->cqn);
304	spin_unlock_irq(&cq_table->lock);
305
306err_icm:
307	mlx4_cq_free_icm(dev, cq->cqn);
308
309	return err;
310}
311EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
312
313void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
314{
315	struct mlx4_priv *priv = mlx4_priv(dev);
316	struct mlx4_cq_table *cq_table = &priv->cq_table;
317	int err;
318
319	err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
320	if (err)
321		mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
322
323	synchronize_irq(priv->eq_table.eq[cq->vector].irq);
324
325	spin_lock_irq(&cq_table->lock);
326	radix_tree_delete(&cq_table->tree, cq->cqn);
327	spin_unlock_irq(&cq_table->lock);
328
329	if (atomic_dec_and_test(&cq->refcount))
330		complete(&cq->free);
331	wait_for_completion(&cq->free);
332
333	mlx4_cq_free_icm(dev, cq->cqn);
334}
335EXPORT_SYMBOL_GPL(mlx4_cq_free);
336
337int mlx4_init_cq_table(struct mlx4_dev *dev)
338{
339	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
340	int err;
341
342	spin_lock_init(&cq_table->lock);
343	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
344	if (mlx4_is_slave(dev))
345		return 0;
346
347	err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
348			       dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
349	if (err)
350		return err;
351
352	return 0;
353}
354
355void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
356{
357	if (mlx4_is_slave(dev))
358		return;
359	/* Nothing to do to clean up radix_tree */
360	mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
361}
362