1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/export.h>
38#include <linux/pci.h>
39#include <linux/errno.h>
40
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/device.h>
43#include <linux/semaphore.h>
44#include <rdma/ib_smi.h>
45
46#include <asm/io.h>
47
48#include "mlx4.h"
49#include "fw.h"
50
51#define CMD_POLL_TOKEN 0xffff
52#define INBOX_MASK	0xffffffffffffff00ULL
53
54#define CMD_CHAN_VER 1
55#define CMD_CHAN_IF_REV 1
56
57enum {
58	/* command completed successfully: */
59	CMD_STAT_OK		= 0x00,
60	/* Internal error (such as a bus error) occurred while processing command: */
61	CMD_STAT_INTERNAL_ERR	= 0x01,
62	/* Operation/command not supported or opcode modifier not supported: */
63	CMD_STAT_BAD_OP		= 0x02,
64	/* Parameter not supported or parameter out of range: */
65	CMD_STAT_BAD_PARAM	= 0x03,
66	/* System not enabled or bad system state: */
67	CMD_STAT_BAD_SYS_STATE	= 0x04,
68	/* Attempt to access reserved or unallocaterd resource: */
69	CMD_STAT_BAD_RESOURCE	= 0x05,
70	/* Requested resource is currently executing a command, or is otherwise busy: */
71	CMD_STAT_RESOURCE_BUSY	= 0x06,
72	/* Required capability exceeds device limits: */
73	CMD_STAT_EXCEED_LIM	= 0x08,
74	/* Resource is not in the appropriate state or ownership: */
75	CMD_STAT_BAD_RES_STATE	= 0x09,
76	/* Index out of range: */
77	CMD_STAT_BAD_INDEX	= 0x0a,
78	/* FW image corrupted: */
79	CMD_STAT_BAD_NVMEM	= 0x0b,
80	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
81	CMD_STAT_ICM_ERROR	= 0x0c,
82	/* Attempt to modify a QP/EE which is not in the presumed state: */
83	CMD_STAT_BAD_QP_STATE   = 0x10,
84	/* Bad segment parameters (Address/Size): */
85	CMD_STAT_BAD_SEG_PARAM	= 0x20,
86	/* Memory Region has Memory Windows bound to: */
87	CMD_STAT_REG_BOUND	= 0x21,
88	/* HCA local attached memory not present: */
89	CMD_STAT_LAM_NOT_PRE	= 0x22,
90	/* Bad management packet (silently discarded): */
91	CMD_STAT_BAD_PKT	= 0x30,
92	/* More outstanding CQEs in CQ than new CQ size: */
93	CMD_STAT_BAD_SIZE	= 0x40,
94	/* Multi Function device support required: */
95	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
96};
97
98enum {
99	HCR_IN_PARAM_OFFSET	= 0x00,
100	HCR_IN_MODIFIER_OFFSET	= 0x08,
101	HCR_OUT_PARAM_OFFSET	= 0x0c,
102	HCR_TOKEN_OFFSET	= 0x14,
103	HCR_STATUS_OFFSET	= 0x18,
104
105	HCR_OPMOD_SHIFT		= 12,
106	HCR_T_BIT		= 21,
107	HCR_E_BIT		= 22,
108	HCR_GO_BIT		= 23
109};
110
111enum {
112	GO_BIT_TIMEOUT_MSECS	= 10000
113};
114
115enum mlx4_vlan_transition {
116	MLX4_VLAN_TRANSITION_VST_VST = 0,
117	MLX4_VLAN_TRANSITION_VST_VGT = 1,
118	MLX4_VLAN_TRANSITION_VGT_VST = 2,
119	MLX4_VLAN_TRANSITION_VGT_VGT = 3,
120};
121
122
123struct mlx4_cmd_context {
124	struct completion	done;
125	int			result;
126	int			next;
127	u64			out_param;
128	u16			token;
129	u8			fw_status;
130};
131
132static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
133				    struct mlx4_vhcr_cmd *in_vhcr);
134
135static int mlx4_status_to_errno(u8 status)
136{
137	static const int trans_table[] = {
138		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
139		[CMD_STAT_BAD_OP]	  = -EPERM,
140		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
141		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
142		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
143		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
144		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
145		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
146		[CMD_STAT_BAD_INDEX]	  = -EBADF,
147		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
148		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
149		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
150		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
151		[CMD_STAT_REG_BOUND]	  = -EBUSY,
152		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
153		[CMD_STAT_BAD_PKT]	  = -EINVAL,
154		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
155		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
156	};
157
158	if (status >= ARRAY_SIZE(trans_table) ||
159	    (status != CMD_STAT_OK && trans_table[status] == 0))
160		return -EIO;
161
162	return trans_table[status];
163}
164
165static u8 mlx4_errno_to_status(int errno)
166{
167	switch (errno) {
168	case -EPERM:
169		return CMD_STAT_BAD_OP;
170	case -EINVAL:
171		return CMD_STAT_BAD_PARAM;
172	case -ENXIO:
173		return CMD_STAT_BAD_SYS_STATE;
174	case -EBUSY:
175		return CMD_STAT_RESOURCE_BUSY;
176	case -ENOMEM:
177		return CMD_STAT_EXCEED_LIM;
178	case -ENFILE:
179		return CMD_STAT_ICM_ERROR;
180	default:
181		return CMD_STAT_INTERNAL_ERR;
182	}
183}
184
185static int comm_pending(struct mlx4_dev *dev)
186{
187	struct mlx4_priv *priv = mlx4_priv(dev);
188	u32 status = readl(&priv->mfunc.comm->slave_read);
189
190	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
191}
192
193static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
194{
195	struct mlx4_priv *priv = mlx4_priv(dev);
196	u32 val;
197
198	priv->cmd.comm_toggle ^= 1;
199	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
200	__raw_writel((__force u32) cpu_to_be32(val),
201		     &priv->mfunc.comm->slave_write);
202	mmiowb();
203}
204
205static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
206		       unsigned long timeout)
207{
208	struct mlx4_priv *priv = mlx4_priv(dev);
209	unsigned long end;
210	int err = 0;
211	int ret_from_pending = 0;
212
213	/* First, verify that the master reports correct status */
214	if (comm_pending(dev)) {
215		mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
216			  priv->cmd.comm_toggle, cmd);
217		return -EAGAIN;
218	}
219
220	/* Write command */
221	down(&priv->cmd.poll_sem);
222	mlx4_comm_cmd_post(dev, cmd, param);
223
224	end = msecs_to_jiffies(timeout) + jiffies;
225	while (comm_pending(dev) && time_before(jiffies, end))
226		cond_resched();
227	ret_from_pending = comm_pending(dev);
228	if (ret_from_pending) {
229		/* check if the slave is trying to boot in the middle of
230		 * FLR process. The only non-zero result in the RESET command
231		 * is MLX4_DELAY_RESET_SLAVE*/
232		if ((MLX4_COMM_CMD_RESET == cmd)) {
233			err = MLX4_DELAY_RESET_SLAVE;
234		} else {
235			mlx4_warn(dev, "Communication channel timed out\n");
236			err = -ETIMEDOUT;
237		}
238	}
239
240	up(&priv->cmd.poll_sem);
241	return err;
242}
243
244static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
245			      u16 param, unsigned long timeout)
246{
247	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
248	struct mlx4_cmd_context *context;
249	unsigned long end;
250	int err = 0;
251
252	down(&cmd->event_sem);
253
254	spin_lock(&cmd->context_lock);
255	BUG_ON(cmd->free_head < 0);
256	context = &cmd->context[cmd->free_head];
257	context->token += cmd->token_mask + 1;
258	cmd->free_head = context->next;
259	spin_unlock(&cmd->context_lock);
260
261	init_completion(&context->done);
262
263	mlx4_comm_cmd_post(dev, op, param);
264
265	if (!wait_for_completion_timeout(&context->done,
266					 msecs_to_jiffies(timeout))) {
267		mlx4_warn(dev, "communication channel command 0x%x timed out\n",
268			  op);
269		err = -EBUSY;
270		goto out;
271	}
272
273	err = context->result;
274	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
275		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
276			 op, context->fw_status);
277		goto out;
278	}
279
280out:
281	/* wait for comm channel ready
282	 * this is necessary for prevention the race
283	 * when switching between event to polling mode
284	 */
285	end = msecs_to_jiffies(timeout) + jiffies;
286	while (comm_pending(dev) && time_before(jiffies, end))
287		cond_resched();
288
289	spin_lock(&cmd->context_lock);
290	context->next = cmd->free_head;
291	cmd->free_head = context - cmd->context;
292	spin_unlock(&cmd->context_lock);
293
294	up(&cmd->event_sem);
295	return err;
296}
297
298int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
299		  unsigned long timeout)
300{
301	if (mlx4_priv(dev)->cmd.use_events)
302		return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
303	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
304}
305
306static int cmd_pending(struct mlx4_dev *dev)
307{
308	u32 status;
309
310	if (pci_channel_offline(dev->pdev))
311		return -EIO;
312
313	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
314
315	return (status & swab32(1 << HCR_GO_BIT)) ||
316		(mlx4_priv(dev)->cmd.toggle ==
317		 !!(status & swab32(1 << HCR_T_BIT)));
318}
319
320static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
321			 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
322			 int event)
323{
324	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
325	u32 __iomem *hcr = cmd->hcr;
326	int ret = -EAGAIN;
327	unsigned long end;
328
329	mutex_lock(&cmd->hcr_mutex);
330
331	if (pci_channel_offline(dev->pdev)) {
332		/*
333		 * Device is going through error recovery
334		 * and cannot accept commands.
335		 */
336		ret = -EIO;
337		goto out;
338	}
339
340	end = jiffies;
341	if (event)
342		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
343
344	while (cmd_pending(dev)) {
345		if (pci_channel_offline(dev->pdev)) {
346			/*
347			 * Device is going through error recovery
348			 * and cannot accept commands.
349			 */
350			ret = -EIO;
351			goto out;
352		}
353
354		if (time_after_eq(jiffies, end)) {
355			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
356			goto out;
357		}
358		cond_resched();
359	}
360
361	/*
362	 * We use writel (instead of something like memcpy_toio)
363	 * because writes of less than 32 bits to the HCR don't work
364	 * (and some architectures such as ia64 implement memcpy_toio
365	 * in terms of writeb).
366	 */
367	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
368	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
369	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
370	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
371	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
372	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
373
374	/* __raw_writel may not order writes. */
375	wmb();
376
377	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
378					       (cmd->toggle << HCR_T_BIT)	|
379					       (event ? (1 << HCR_E_BIT) : 0)	|
380					       (op_modifier << HCR_OPMOD_SHIFT) |
381					       op), hcr + 6);
382
383	/*
384	 * Make sure that our HCR writes don't get mixed in with
385	 * writes from another CPU starting a FW command.
386	 */
387	mmiowb();
388
389	cmd->toggle = cmd->toggle ^ 1;
390
391	ret = 0;
392
393out:
394	mutex_unlock(&cmd->hcr_mutex);
395	return ret;
396}
397
398static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
399			  int out_is_imm, u32 in_modifier, u8 op_modifier,
400			  u16 op, unsigned long timeout)
401{
402	struct mlx4_priv *priv = mlx4_priv(dev);
403	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
404	int ret;
405
406	mutex_lock(&priv->cmd.slave_cmd_mutex);
407
408	vhcr->in_param = cpu_to_be64(in_param);
409	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
410	vhcr->in_modifier = cpu_to_be32(in_modifier);
411	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
412	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
413	vhcr->status = 0;
414	vhcr->flags = !!(priv->cmd.use_events) << 6;
415
416	if (mlx4_is_master(dev)) {
417		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
418		if (!ret) {
419			if (out_is_imm) {
420				if (out_param)
421					*out_param =
422						be64_to_cpu(vhcr->out_param);
423				else {
424					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
425						 op);
426					vhcr->status = CMD_STAT_BAD_PARAM;
427				}
428			}
429			ret = mlx4_status_to_errno(vhcr->status);
430		}
431	} else {
432		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
433				    MLX4_COMM_TIME + timeout);
434		if (!ret) {
435			if (out_is_imm) {
436				if (out_param)
437					*out_param =
438						be64_to_cpu(vhcr->out_param);
439				else {
440					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
441						 op);
442					vhcr->status = CMD_STAT_BAD_PARAM;
443				}
444			}
445			ret = mlx4_status_to_errno(vhcr->status);
446		} else
447			mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
448				 op);
449	}
450
451	mutex_unlock(&priv->cmd.slave_cmd_mutex);
452	return ret;
453}
454
455static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
456			 int out_is_imm, u32 in_modifier, u8 op_modifier,
457			 u16 op, unsigned long timeout)
458{
459	struct mlx4_priv *priv = mlx4_priv(dev);
460	void __iomem *hcr = priv->cmd.hcr;
461	int err = 0;
462	unsigned long end;
463	u32 stat;
464
465	down(&priv->cmd.poll_sem);
466
467	if (pci_channel_offline(dev->pdev)) {
468		/*
469		 * Device is going through error recovery
470		 * and cannot accept commands.
471		 */
472		err = -EIO;
473		goto out;
474	}
475
476	if (out_is_imm && !out_param) {
477		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
478			 op);
479		err = -EINVAL;
480		goto out;
481	}
482
483	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
484			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
485	if (err)
486		goto out;
487
488	end = msecs_to_jiffies(timeout) + jiffies;
489	while (cmd_pending(dev) && time_before(jiffies, end)) {
490		if (pci_channel_offline(dev->pdev)) {
491			/*
492			 * Device is going through error recovery
493			 * and cannot accept commands.
494			 */
495			err = -EIO;
496			goto out;
497		}
498
499		cond_resched();
500	}
501
502	if (cmd_pending(dev)) {
503		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
504			  op);
505		err = -ETIMEDOUT;
506		goto out;
507	}
508
509	if (out_is_imm)
510		*out_param =
511			(u64) be32_to_cpu((__force __be32)
512					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
513			(u64) be32_to_cpu((__force __be32)
514					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
515	stat = be32_to_cpu((__force __be32)
516			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
517	err = mlx4_status_to_errno(stat);
518	if (err)
519		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
520			 op, stat);
521
522out:
523	up(&priv->cmd.poll_sem);
524	return err;
525}
526
527void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
528{
529	struct mlx4_priv *priv = mlx4_priv(dev);
530	struct mlx4_cmd_context *context =
531		&priv->cmd.context[token & priv->cmd.token_mask];
532
533	/* previously timed out command completing at long last */
534	if (token != context->token)
535		return;
536
537	context->fw_status = status;
538	context->result    = mlx4_status_to_errno(status);
539	context->out_param = out_param;
540
541	complete(&context->done);
542}
543
544static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
545			 int out_is_imm, u32 in_modifier, u8 op_modifier,
546			 u16 op, unsigned long timeout)
547{
548	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
549	struct mlx4_cmd_context *context;
550	int err = 0;
551
552	down(&cmd->event_sem);
553
554	spin_lock(&cmd->context_lock);
555	BUG_ON(cmd->free_head < 0);
556	context = &cmd->context[cmd->free_head];
557	context->token += cmd->token_mask + 1;
558	cmd->free_head = context->next;
559	spin_unlock(&cmd->context_lock);
560
561	if (out_is_imm && !out_param) {
562		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
563			 op);
564		err = -EINVAL;
565		goto out;
566	}
567
568	init_completion(&context->done);
569
570	mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
571		      in_modifier, op_modifier, op, context->token, 1);
572
573	if (!wait_for_completion_timeout(&context->done,
574					 msecs_to_jiffies(timeout))) {
575		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
576			  op);
577		err = -EBUSY;
578		goto out;
579	}
580
581	err = context->result;
582	if (err) {
583		/* Since we do not want to have this error message always
584		 * displayed at driver start when there are ConnectX2 HCAs
585		 * on the host, we deprecate the error message for this
586		 * specific command/input_mod/opcode_mod/fw-status to be debug.
587		 */
588		if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
589		    op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
590			mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
591				 op, context->fw_status);
592		else
593			mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
594				 op, context->fw_status);
595		goto out;
596	}
597
598	if (out_is_imm)
599		*out_param = context->out_param;
600
601out:
602	spin_lock(&cmd->context_lock);
603	context->next = cmd->free_head;
604	cmd->free_head = context - cmd->context;
605	spin_unlock(&cmd->context_lock);
606
607	up(&cmd->event_sem);
608	return err;
609}
610
611int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
612	       int out_is_imm, u32 in_modifier, u8 op_modifier,
613	       u16 op, unsigned long timeout, int native)
614{
615	if (pci_channel_offline(dev->pdev))
616		return -EIO;
617
618	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
619		if (mlx4_priv(dev)->cmd.use_events)
620			return mlx4_cmd_wait(dev, in_param, out_param,
621					     out_is_imm, in_modifier,
622					     op_modifier, op, timeout);
623		else
624			return mlx4_cmd_poll(dev, in_param, out_param,
625					     out_is_imm, in_modifier,
626					     op_modifier, op, timeout);
627	}
628	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
629			      in_modifier, op_modifier, op, timeout);
630}
631EXPORT_SYMBOL_GPL(__mlx4_cmd);
632
633
634static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
635{
636	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
637			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
638}
639
640static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
641			   int slave, u64 slave_addr,
642			   int size, int is_read)
643{
644	u64 in_param;
645	u64 out_param;
646
647	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
648	    (slave & ~0x7f) | (size & 0xff)) {
649		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
650			 slave_addr, master_addr, slave, size);
651		return -EINVAL;
652	}
653
654	if (is_read) {
655		in_param = (u64) slave | slave_addr;
656		out_param = (u64) dev->caps.function | master_addr;
657	} else {
658		in_param = (u64) dev->caps.function | master_addr;
659		out_param = (u64) slave | slave_addr;
660	}
661
662	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
663			    MLX4_CMD_ACCESS_MEM,
664			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
665}
666
667static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
668			       struct mlx4_cmd_mailbox *inbox,
669			       struct mlx4_cmd_mailbox *outbox)
670{
671	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
672	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
673	int err;
674	int i;
675
676	if (index & 0x1f)
677		return -EINVAL;
678
679	in_mad->attr_mod = cpu_to_be32(index / 32);
680
681	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
682			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
683			   MLX4_CMD_NATIVE);
684	if (err)
685		return err;
686
687	for (i = 0; i < 32; ++i)
688		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
689
690	return err;
691}
692
693static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
694			       struct mlx4_cmd_mailbox *inbox,
695			       struct mlx4_cmd_mailbox *outbox)
696{
697	int i;
698	int err;
699
700	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
701		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
702		if (err)
703			return err;
704	}
705
706	return 0;
707}
708#define PORT_CAPABILITY_LOCATION_IN_SMP 20
709#define PORT_STATE_OFFSET 32
710
711static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
712{
713	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
714		return IB_PORT_ACTIVE;
715	else
716		return IB_PORT_DOWN;
717}
718
719static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
720				struct mlx4_vhcr *vhcr,
721				struct mlx4_cmd_mailbox *inbox,
722				struct mlx4_cmd_mailbox *outbox,
723				struct mlx4_cmd_info *cmd)
724{
725	struct ib_smp *smp = inbox->buf;
726	u32 index;
727	u8 port;
728	u8 opcode_modifier;
729	u16 *table;
730	int err;
731	int vidx, pidx;
732	int network_view;
733	struct mlx4_priv *priv = mlx4_priv(dev);
734	struct ib_smp *outsmp = outbox->buf;
735	__be16 *outtab = (__be16 *)(outsmp->data);
736	__be32 slave_cap_mask;
737	__be64 slave_node_guid;
738
739	port = vhcr->in_modifier;
740
741	/* network-view bit is for driver use only, and should not be passed to FW */
742	opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
743	network_view = !!(vhcr->op_modifier & 0x8);
744
745	if (smp->base_version == 1 &&
746	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
747	    smp->class_version == 1) {
748		/* host view is paravirtualized */
749		if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
750			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
751				index = be32_to_cpu(smp->attr_mod);
752				if (port < 1 || port > dev->caps.num_ports)
753					return -EINVAL;
754				table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
755				if (!table)
756					return -ENOMEM;
757				/* need to get the full pkey table because the paravirtualized
758				 * pkeys may be scattered among several pkey blocks.
759				 */
760				err = get_full_pkey_table(dev, port, table, inbox, outbox);
761				if (!err) {
762					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
763						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
764						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
765					}
766				}
767				kfree(table);
768				return err;
769			}
770			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
771				/*get the slave specific caps:*/
772				/*do the command */
773				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
774					    vhcr->in_modifier, opcode_modifier,
775					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
776				/* modify the response for slaves */
777				if (!err && slave != mlx4_master_func_num(dev)) {
778					u8 *state = outsmp->data + PORT_STATE_OFFSET;
779
780					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
781					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
782					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
783				}
784				return err;
785			}
786			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
787				/* compute slave's gid block */
788				smp->attr_mod = cpu_to_be32(slave / 8);
789				/* execute cmd */
790				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
791					     vhcr->in_modifier, opcode_modifier,
792					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
793				if (!err) {
794					/* if needed, move slave gid to index 0 */
795					if (slave % 8)
796						memcpy(outsmp->data,
797						       outsmp->data + (slave % 8) * 8, 8);
798					/* delete all other gids */
799					memset(outsmp->data + 8, 0, 56);
800				}
801				return err;
802			}
803			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
804				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
805					     vhcr->in_modifier, opcode_modifier,
806					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
807				if (!err) {
808					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
809					memcpy(outsmp->data + 12, &slave_node_guid, 8);
810				}
811				return err;
812			}
813		}
814	}
815
816	/* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
817	 * These are the MADs used by ib verbs (such as ib_query_gids).
818	 */
819	if (slave != mlx4_master_func_num(dev) &&
820	    !mlx4_vf_smi_enabled(dev, slave, port)) {
821		if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
822		      smp->method == IB_MGMT_METHOD_GET) || network_view) {
823			mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
824				 slave, smp->method, smp->mgmt_class,
825				 network_view ? "Network" : "Host",
826				 be16_to_cpu(smp->attr_id));
827			return -EPERM;
828		}
829	}
830
831	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
832				    vhcr->in_modifier, opcode_modifier,
833				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
834}
835
836static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
837		     struct mlx4_vhcr *vhcr,
838		     struct mlx4_cmd_mailbox *inbox,
839		     struct mlx4_cmd_mailbox *outbox,
840		     struct mlx4_cmd_info *cmd)
841{
842	return -EPERM;
843}
844
845int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
846		     struct mlx4_vhcr *vhcr,
847		     struct mlx4_cmd_mailbox *inbox,
848		     struct mlx4_cmd_mailbox *outbox,
849		     struct mlx4_cmd_info *cmd)
850{
851	u64 in_param;
852	u64 out_param;
853	int err;
854
855	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
856	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
857	if (cmd->encode_slave_id) {
858		in_param &= 0xffffffffffffff00ll;
859		in_param |= slave;
860	}
861
862	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
863			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
864			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
865
866	if (cmd->out_is_imm)
867		vhcr->out_param = out_param;
868
869	return err;
870}
871
872static struct mlx4_cmd_info cmd_info[] = {
873	{
874		.opcode = MLX4_CMD_QUERY_FW,
875		.has_inbox = false,
876		.has_outbox = true,
877		.out_is_imm = false,
878		.encode_slave_id = false,
879		.verify = NULL,
880		.wrapper = mlx4_QUERY_FW_wrapper
881	},
882	{
883		.opcode = MLX4_CMD_QUERY_HCA,
884		.has_inbox = false,
885		.has_outbox = true,
886		.out_is_imm = false,
887		.encode_slave_id = false,
888		.verify = NULL,
889		.wrapper = NULL
890	},
891	{
892		.opcode = MLX4_CMD_QUERY_DEV_CAP,
893		.has_inbox = false,
894		.has_outbox = true,
895		.out_is_imm = false,
896		.encode_slave_id = false,
897		.verify = NULL,
898		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
899	},
900	{
901		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
902		.has_inbox = false,
903		.has_outbox = true,
904		.out_is_imm = false,
905		.encode_slave_id = false,
906		.verify = NULL,
907		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
908	},
909	{
910		.opcode = MLX4_CMD_QUERY_ADAPTER,
911		.has_inbox = false,
912		.has_outbox = true,
913		.out_is_imm = false,
914		.encode_slave_id = false,
915		.verify = NULL,
916		.wrapper = NULL
917	},
918	{
919		.opcode = MLX4_CMD_INIT_PORT,
920		.has_inbox = false,
921		.has_outbox = false,
922		.out_is_imm = false,
923		.encode_slave_id = false,
924		.verify = NULL,
925		.wrapper = mlx4_INIT_PORT_wrapper
926	},
927	{
928		.opcode = MLX4_CMD_CLOSE_PORT,
929		.has_inbox = false,
930		.has_outbox = false,
931		.out_is_imm  = false,
932		.encode_slave_id = false,
933		.verify = NULL,
934		.wrapper = mlx4_CLOSE_PORT_wrapper
935	},
936	{
937		.opcode = MLX4_CMD_QUERY_PORT,
938		.has_inbox = false,
939		.has_outbox = true,
940		.out_is_imm = false,
941		.encode_slave_id = false,
942		.verify = NULL,
943		.wrapper = mlx4_QUERY_PORT_wrapper
944	},
945	{
946		.opcode = MLX4_CMD_SET_PORT,
947		.has_inbox = true,
948		.has_outbox = false,
949		.out_is_imm = false,
950		.encode_slave_id = false,
951		.verify = NULL,
952		.wrapper = mlx4_SET_PORT_wrapper
953	},
954	{
955		.opcode = MLX4_CMD_MAP_EQ,
956		.has_inbox = false,
957		.has_outbox = false,
958		.out_is_imm = false,
959		.encode_slave_id = false,
960		.verify = NULL,
961		.wrapper = mlx4_MAP_EQ_wrapper
962	},
963	{
964		.opcode = MLX4_CMD_SW2HW_EQ,
965		.has_inbox = true,
966		.has_outbox = false,
967		.out_is_imm = false,
968		.encode_slave_id = true,
969		.verify = NULL,
970		.wrapper = mlx4_SW2HW_EQ_wrapper
971	},
972	{
973		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
974		.has_inbox = false,
975		.has_outbox = false,
976		.out_is_imm = false,
977		.encode_slave_id = false,
978		.verify = NULL,
979		.wrapper = NULL
980	},
981	{
982		.opcode = MLX4_CMD_NOP,
983		.has_inbox = false,
984		.has_outbox = false,
985		.out_is_imm = false,
986		.encode_slave_id = false,
987		.verify = NULL,
988		.wrapper = NULL
989	},
990	{
991		.opcode = MLX4_CMD_CONFIG_DEV,
992		.has_inbox = false,
993		.has_outbox = false,
994		.out_is_imm = false,
995		.encode_slave_id = false,
996		.verify = NULL,
997		.wrapper = mlx4_CMD_EPERM_wrapper
998	},
999	{
1000		.opcode = MLX4_CMD_ALLOC_RES,
1001		.has_inbox = false,
1002		.has_outbox = false,
1003		.out_is_imm = true,
1004		.encode_slave_id = false,
1005		.verify = NULL,
1006		.wrapper = mlx4_ALLOC_RES_wrapper
1007	},
1008	{
1009		.opcode = MLX4_CMD_FREE_RES,
1010		.has_inbox = false,
1011		.has_outbox = false,
1012		.out_is_imm = false,
1013		.encode_slave_id = false,
1014		.verify = NULL,
1015		.wrapper = mlx4_FREE_RES_wrapper
1016	},
1017	{
1018		.opcode = MLX4_CMD_SW2HW_MPT,
1019		.has_inbox = true,
1020		.has_outbox = false,
1021		.out_is_imm = false,
1022		.encode_slave_id = true,
1023		.verify = NULL,
1024		.wrapper = mlx4_SW2HW_MPT_wrapper
1025	},
1026	{
1027		.opcode = MLX4_CMD_QUERY_MPT,
1028		.has_inbox = false,
1029		.has_outbox = true,
1030		.out_is_imm = false,
1031		.encode_slave_id = false,
1032		.verify = NULL,
1033		.wrapper = mlx4_QUERY_MPT_wrapper
1034	},
1035	{
1036		.opcode = MLX4_CMD_HW2SW_MPT,
1037		.has_inbox = false,
1038		.has_outbox = false,
1039		.out_is_imm = false,
1040		.encode_slave_id = false,
1041		.verify = NULL,
1042		.wrapper = mlx4_HW2SW_MPT_wrapper
1043	},
1044	{
1045		.opcode = MLX4_CMD_READ_MTT,
1046		.has_inbox = false,
1047		.has_outbox = true,
1048		.out_is_imm = false,
1049		.encode_slave_id = false,
1050		.verify = NULL,
1051		.wrapper = NULL
1052	},
1053	{
1054		.opcode = MLX4_CMD_WRITE_MTT,
1055		.has_inbox = true,
1056		.has_outbox = false,
1057		.out_is_imm = false,
1058		.encode_slave_id = false,
1059		.verify = NULL,
1060		.wrapper = mlx4_WRITE_MTT_wrapper
1061	},
1062	{
1063		.opcode = MLX4_CMD_SYNC_TPT,
1064		.has_inbox = true,
1065		.has_outbox = false,
1066		.out_is_imm = false,
1067		.encode_slave_id = false,
1068		.verify = NULL,
1069		.wrapper = NULL
1070	},
1071	{
1072		.opcode = MLX4_CMD_HW2SW_EQ,
1073		.has_inbox = false,
1074		.has_outbox = true,
1075		.out_is_imm = false,
1076		.encode_slave_id = true,
1077		.verify = NULL,
1078		.wrapper = mlx4_HW2SW_EQ_wrapper
1079	},
1080	{
1081		.opcode = MLX4_CMD_QUERY_EQ,
1082		.has_inbox = false,
1083		.has_outbox = true,
1084		.out_is_imm = false,
1085		.encode_slave_id = true,
1086		.verify = NULL,
1087		.wrapper = mlx4_QUERY_EQ_wrapper
1088	},
1089	{
1090		.opcode = MLX4_CMD_SW2HW_CQ,
1091		.has_inbox = true,
1092		.has_outbox = false,
1093		.out_is_imm = false,
1094		.encode_slave_id = true,
1095		.verify = NULL,
1096		.wrapper = mlx4_SW2HW_CQ_wrapper
1097	},
1098	{
1099		.opcode = MLX4_CMD_HW2SW_CQ,
1100		.has_inbox = false,
1101		.has_outbox = false,
1102		.out_is_imm = false,
1103		.encode_slave_id = false,
1104		.verify = NULL,
1105		.wrapper = mlx4_HW2SW_CQ_wrapper
1106	},
1107	{
1108		.opcode = MLX4_CMD_QUERY_CQ,
1109		.has_inbox = false,
1110		.has_outbox = true,
1111		.out_is_imm = false,
1112		.encode_slave_id = false,
1113		.verify = NULL,
1114		.wrapper = mlx4_QUERY_CQ_wrapper
1115	},
1116	{
1117		.opcode = MLX4_CMD_MODIFY_CQ,
1118		.has_inbox = true,
1119		.has_outbox = false,
1120		.out_is_imm = true,
1121		.encode_slave_id = false,
1122		.verify = NULL,
1123		.wrapper = mlx4_MODIFY_CQ_wrapper
1124	},
1125	{
1126		.opcode = MLX4_CMD_SW2HW_SRQ,
1127		.has_inbox = true,
1128		.has_outbox = false,
1129		.out_is_imm = false,
1130		.encode_slave_id = true,
1131		.verify = NULL,
1132		.wrapper = mlx4_SW2HW_SRQ_wrapper
1133	},
1134	{
1135		.opcode = MLX4_CMD_HW2SW_SRQ,
1136		.has_inbox = false,
1137		.has_outbox = false,
1138		.out_is_imm = false,
1139		.encode_slave_id = false,
1140		.verify = NULL,
1141		.wrapper = mlx4_HW2SW_SRQ_wrapper
1142	},
1143	{
1144		.opcode = MLX4_CMD_QUERY_SRQ,
1145		.has_inbox = false,
1146		.has_outbox = true,
1147		.out_is_imm = false,
1148		.encode_slave_id = false,
1149		.verify = NULL,
1150		.wrapper = mlx4_QUERY_SRQ_wrapper
1151	},
1152	{
1153		.opcode = MLX4_CMD_ARM_SRQ,
1154		.has_inbox = false,
1155		.has_outbox = false,
1156		.out_is_imm = false,
1157		.encode_slave_id = false,
1158		.verify = NULL,
1159		.wrapper = mlx4_ARM_SRQ_wrapper
1160	},
1161	{
1162		.opcode = MLX4_CMD_RST2INIT_QP,
1163		.has_inbox = true,
1164		.has_outbox = false,
1165		.out_is_imm = false,
1166		.encode_slave_id = true,
1167		.verify = NULL,
1168		.wrapper = mlx4_RST2INIT_QP_wrapper
1169	},
1170	{
1171		.opcode = MLX4_CMD_INIT2INIT_QP,
1172		.has_inbox = true,
1173		.has_outbox = false,
1174		.out_is_imm = false,
1175		.encode_slave_id = false,
1176		.verify = NULL,
1177		.wrapper = mlx4_INIT2INIT_QP_wrapper
1178	},
1179	{
1180		.opcode = MLX4_CMD_INIT2RTR_QP,
1181		.has_inbox = true,
1182		.has_outbox = false,
1183		.out_is_imm = false,
1184		.encode_slave_id = false,
1185		.verify = NULL,
1186		.wrapper = mlx4_INIT2RTR_QP_wrapper
1187	},
1188	{
1189		.opcode = MLX4_CMD_RTR2RTS_QP,
1190		.has_inbox = true,
1191		.has_outbox = false,
1192		.out_is_imm = false,
1193		.encode_slave_id = false,
1194		.verify = NULL,
1195		.wrapper = mlx4_RTR2RTS_QP_wrapper
1196	},
1197	{
1198		.opcode = MLX4_CMD_RTS2RTS_QP,
1199		.has_inbox = true,
1200		.has_outbox = false,
1201		.out_is_imm = false,
1202		.encode_slave_id = false,
1203		.verify = NULL,
1204		.wrapper = mlx4_RTS2RTS_QP_wrapper
1205	},
1206	{
1207		.opcode = MLX4_CMD_SQERR2RTS_QP,
1208		.has_inbox = true,
1209		.has_outbox = false,
1210		.out_is_imm = false,
1211		.encode_slave_id = false,
1212		.verify = NULL,
1213		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1214	},
1215	{
1216		.opcode = MLX4_CMD_2ERR_QP,
1217		.has_inbox = false,
1218		.has_outbox = false,
1219		.out_is_imm = false,
1220		.encode_slave_id = false,
1221		.verify = NULL,
1222		.wrapper = mlx4_GEN_QP_wrapper
1223	},
1224	{
1225		.opcode = MLX4_CMD_RTS2SQD_QP,
1226		.has_inbox = false,
1227		.has_outbox = false,
1228		.out_is_imm = false,
1229		.encode_slave_id = false,
1230		.verify = NULL,
1231		.wrapper = mlx4_GEN_QP_wrapper
1232	},
1233	{
1234		.opcode = MLX4_CMD_SQD2SQD_QP,
1235		.has_inbox = true,
1236		.has_outbox = false,
1237		.out_is_imm = false,
1238		.encode_slave_id = false,
1239		.verify = NULL,
1240		.wrapper = mlx4_SQD2SQD_QP_wrapper
1241	},
1242	{
1243		.opcode = MLX4_CMD_SQD2RTS_QP,
1244		.has_inbox = true,
1245		.has_outbox = false,
1246		.out_is_imm = false,
1247		.encode_slave_id = false,
1248		.verify = NULL,
1249		.wrapper = mlx4_SQD2RTS_QP_wrapper
1250	},
1251	{
1252		.opcode = MLX4_CMD_2RST_QP,
1253		.has_inbox = false,
1254		.has_outbox = false,
1255		.out_is_imm = false,
1256		.encode_slave_id = false,
1257		.verify = NULL,
1258		.wrapper = mlx4_2RST_QP_wrapper
1259	},
1260	{
1261		.opcode = MLX4_CMD_QUERY_QP,
1262		.has_inbox = false,
1263		.has_outbox = true,
1264		.out_is_imm = false,
1265		.encode_slave_id = false,
1266		.verify = NULL,
1267		.wrapper = mlx4_GEN_QP_wrapper
1268	},
1269	{
1270		.opcode = MLX4_CMD_SUSPEND_QP,
1271		.has_inbox = false,
1272		.has_outbox = false,
1273		.out_is_imm = false,
1274		.encode_slave_id = false,
1275		.verify = NULL,
1276		.wrapper = mlx4_GEN_QP_wrapper
1277	},
1278	{
1279		.opcode = MLX4_CMD_UNSUSPEND_QP,
1280		.has_inbox = false,
1281		.has_outbox = false,
1282		.out_is_imm = false,
1283		.encode_slave_id = false,
1284		.verify = NULL,
1285		.wrapper = mlx4_GEN_QP_wrapper
1286	},
1287	{
1288		.opcode = MLX4_CMD_UPDATE_QP,
1289		.has_inbox = true,
1290		.has_outbox = false,
1291		.out_is_imm = false,
1292		.encode_slave_id = false,
1293		.verify = NULL,
1294		.wrapper = mlx4_UPDATE_QP_wrapper
1295	},
1296	{
1297		.opcode = MLX4_CMD_GET_OP_REQ,
1298		.has_inbox = false,
1299		.has_outbox = false,
1300		.out_is_imm = false,
1301		.encode_slave_id = false,
1302		.verify = NULL,
1303		.wrapper = mlx4_CMD_EPERM_wrapper,
1304	},
1305	{
1306		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1307		.has_inbox = false,
1308		.has_outbox = false,
1309		.out_is_imm = false,
1310		.encode_slave_id = false,
1311		.verify = NULL, /* XXX verify: only demux can do this */
1312		.wrapper = NULL
1313	},
1314	{
1315		.opcode = MLX4_CMD_MAD_IFC,
1316		.has_inbox = true,
1317		.has_outbox = true,
1318		.out_is_imm = false,
1319		.encode_slave_id = false,
1320		.verify = NULL,
1321		.wrapper = mlx4_MAD_IFC_wrapper
1322	},
1323	{
1324		.opcode = MLX4_CMD_MAD_DEMUX,
1325		.has_inbox = false,
1326		.has_outbox = false,
1327		.out_is_imm = false,
1328		.encode_slave_id = false,
1329		.verify = NULL,
1330		.wrapper = mlx4_CMD_EPERM_wrapper
1331	},
1332	{
1333		.opcode = MLX4_CMD_QUERY_IF_STAT,
1334		.has_inbox = false,
1335		.has_outbox = true,
1336		.out_is_imm = false,
1337		.encode_slave_id = false,
1338		.verify = NULL,
1339		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1340	},
1341	/* Native multicast commands are not available for guests */
1342	{
1343		.opcode = MLX4_CMD_QP_ATTACH,
1344		.has_inbox = true,
1345		.has_outbox = false,
1346		.out_is_imm = false,
1347		.encode_slave_id = false,
1348		.verify = NULL,
1349		.wrapper = mlx4_QP_ATTACH_wrapper
1350	},
1351	{
1352		.opcode = MLX4_CMD_PROMISC,
1353		.has_inbox = false,
1354		.has_outbox = false,
1355		.out_is_imm = false,
1356		.encode_slave_id = false,
1357		.verify = NULL,
1358		.wrapper = mlx4_PROMISC_wrapper
1359	},
1360	/* Ethernet specific commands */
1361	{
1362		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1363		.has_inbox = true,
1364		.has_outbox = false,
1365		.out_is_imm = false,
1366		.encode_slave_id = false,
1367		.verify = NULL,
1368		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1369	},
1370	{
1371		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1372		.has_inbox = false,
1373		.has_outbox = false,
1374		.out_is_imm = false,
1375		.encode_slave_id = false,
1376		.verify = NULL,
1377		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1378	},
1379	{
1380		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1381		.has_inbox = false,
1382		.has_outbox = true,
1383		.out_is_imm = false,
1384		.encode_slave_id = false,
1385		.verify = NULL,
1386		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1387	},
1388	{
1389		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1390		.has_inbox = false,
1391		.has_outbox = false,
1392		.out_is_imm = false,
1393		.encode_slave_id = false,
1394		.verify = NULL,
1395		.wrapper = NULL
1396	},
1397	/* flow steering commands */
1398	{
1399		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1400		.has_inbox = true,
1401		.has_outbox = false,
1402		.out_is_imm = true,
1403		.encode_slave_id = false,
1404		.verify = NULL,
1405		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1406	},
1407	{
1408		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1409		.has_inbox = false,
1410		.has_outbox = false,
1411		.out_is_imm = false,
1412		.encode_slave_id = false,
1413		.verify = NULL,
1414		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1415	},
1416	{
1417		.opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1418		.has_inbox = false,
1419		.has_outbox = false,
1420		.out_is_imm = false,
1421		.encode_slave_id = false,
1422		.verify = NULL,
1423		.wrapper = mlx4_CMD_EPERM_wrapper
1424	},
1425};
1426
1427static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1428				    struct mlx4_vhcr_cmd *in_vhcr)
1429{
1430	struct mlx4_priv *priv = mlx4_priv(dev);
1431	struct mlx4_cmd_info *cmd = NULL;
1432	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1433	struct mlx4_vhcr *vhcr;
1434	struct mlx4_cmd_mailbox *inbox = NULL;
1435	struct mlx4_cmd_mailbox *outbox = NULL;
1436	u64 in_param;
1437	u64 out_param;
1438	int ret = 0;
1439	int i;
1440	int err = 0;
1441
1442	/* Create sw representation of Virtual HCR */
1443	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1444	if (!vhcr)
1445		return -ENOMEM;
1446
1447	/* DMA in the vHCR */
1448	if (!in_vhcr) {
1449		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1450				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1451				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1452					    MLX4_ACCESS_MEM_ALIGN), 1);
1453		if (ret) {
1454			mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1455				 __func__, ret);
1456			kfree(vhcr);
1457			return ret;
1458		}
1459	}
1460
1461	/* Fill SW VHCR fields */
1462	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1463	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1464	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1465	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1466	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1467	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1468	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1469
1470	/* Lookup command */
1471	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1472		if (vhcr->op == cmd_info[i].opcode) {
1473			cmd = &cmd_info[i];
1474			break;
1475		}
1476	}
1477	if (!cmd) {
1478		mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1479			 vhcr->op, slave);
1480		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1481		goto out_status;
1482	}
1483
1484	/* Read inbox */
1485	if (cmd->has_inbox) {
1486		vhcr->in_param &= INBOX_MASK;
1487		inbox = mlx4_alloc_cmd_mailbox(dev);
1488		if (IS_ERR(inbox)) {
1489			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1490			inbox = NULL;
1491			goto out_status;
1492		}
1493
1494		if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1495				    vhcr->in_param,
1496				    MLX4_MAILBOX_SIZE, 1)) {
1497			mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1498				 __func__, cmd->opcode);
1499			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1500			goto out_status;
1501		}
1502	}
1503
1504	/* Apply permission and bound checks if applicable */
1505	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1506		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1507			  vhcr->op, slave, vhcr->in_modifier);
1508		vhcr_cmd->status = CMD_STAT_BAD_OP;
1509		goto out_status;
1510	}
1511
1512	/* Allocate outbox */
1513	if (cmd->has_outbox) {
1514		outbox = mlx4_alloc_cmd_mailbox(dev);
1515		if (IS_ERR(outbox)) {
1516			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1517			outbox = NULL;
1518			goto out_status;
1519		}
1520	}
1521
1522	/* Execute the command! */
1523	if (cmd->wrapper) {
1524		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1525				   cmd);
1526		if (cmd->out_is_imm)
1527			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1528	} else {
1529		in_param = cmd->has_inbox ? (u64) inbox->dma :
1530			vhcr->in_param;
1531		out_param = cmd->has_outbox ? (u64) outbox->dma :
1532			vhcr->out_param;
1533		err = __mlx4_cmd(dev, in_param, &out_param,
1534				 cmd->out_is_imm, vhcr->in_modifier,
1535				 vhcr->op_modifier, vhcr->op,
1536				 MLX4_CMD_TIME_CLASS_A,
1537				 MLX4_CMD_NATIVE);
1538
1539		if (cmd->out_is_imm) {
1540			vhcr->out_param = out_param;
1541			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1542		}
1543	}
1544
1545	if (err) {
1546		mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1547			  vhcr->op, slave, vhcr->errno, err);
1548		vhcr_cmd->status = mlx4_errno_to_status(err);
1549		goto out_status;
1550	}
1551
1552
1553	/* Write outbox if command completed successfully */
1554	if (cmd->has_outbox && !vhcr_cmd->status) {
1555		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1556				      vhcr->out_param,
1557				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1558		if (ret) {
1559			/* If we failed to write back the outbox after the
1560			 *command was successfully executed, we must fail this
1561			 * slave, as it is now in undefined state */
1562			mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1563			goto out;
1564		}
1565	}
1566
1567out_status:
1568	/* DMA back vhcr result */
1569	if (!in_vhcr) {
1570		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1571				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1572				      ALIGN(sizeof(struct mlx4_vhcr),
1573					    MLX4_ACCESS_MEM_ALIGN),
1574				      MLX4_CMD_WRAPPED);
1575		if (ret)
1576			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1577				 __func__);
1578		else if (vhcr->e_bit &&
1579			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1580				mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1581					  slave);
1582	}
1583
1584out:
1585	kfree(vhcr);
1586	mlx4_free_cmd_mailbox(dev, inbox);
1587	mlx4_free_cmd_mailbox(dev, outbox);
1588	return ret;
1589}
1590
1591static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1592					    int slave, int port)
1593{
1594	struct mlx4_vport_oper_state *vp_oper;
1595	struct mlx4_vport_state *vp_admin;
1596	struct mlx4_vf_immed_vlan_work *work;
1597	struct mlx4_dev *dev = &(priv->dev);
1598	int err;
1599	int admin_vlan_ix = NO_INDX;
1600
1601	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1602	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1603
1604	if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1605	    vp_oper->state.default_qos == vp_admin->default_qos &&
1606	    vp_oper->state.link_state == vp_admin->link_state)
1607		return 0;
1608
1609	if (!(priv->mfunc.master.slave_state[slave].active &&
1610	      dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1611		/* even if the UPDATE_QP command isn't supported, we still want
1612		 * to set this VF link according to the admin directive
1613		 */
1614		vp_oper->state.link_state = vp_admin->link_state;
1615		return -1;
1616	}
1617
1618	mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1619		 slave, port);
1620	mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1621		 vp_admin->default_vlan, vp_admin->default_qos,
1622		 vp_admin->link_state);
1623
1624	work = kzalloc(sizeof(*work), GFP_KERNEL);
1625	if (!work)
1626		return -ENOMEM;
1627
1628	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1629		if (MLX4_VGT != vp_admin->default_vlan) {
1630			err = __mlx4_register_vlan(&priv->dev, port,
1631						   vp_admin->default_vlan,
1632						   &admin_vlan_ix);
1633			if (err) {
1634				kfree(work);
1635				mlx4_warn(&priv->dev,
1636					  "No vlan resources slave %d, port %d\n",
1637					  slave, port);
1638				return err;
1639			}
1640		} else {
1641			admin_vlan_ix = NO_INDX;
1642		}
1643		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1644		mlx4_dbg(&priv->dev,
1645			 "alloc vlan %d idx  %d slave %d port %d\n",
1646			 (int)(vp_admin->default_vlan),
1647			 admin_vlan_ix, slave, port);
1648	}
1649
1650	/* save original vlan ix and vlan id */
1651	work->orig_vlan_id = vp_oper->state.default_vlan;
1652	work->orig_vlan_ix = vp_oper->vlan_idx;
1653
1654	/* handle new qos */
1655	if (vp_oper->state.default_qos != vp_admin->default_qos)
1656		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1657
1658	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1659		vp_oper->vlan_idx = admin_vlan_ix;
1660
1661	vp_oper->state.default_vlan = vp_admin->default_vlan;
1662	vp_oper->state.default_qos = vp_admin->default_qos;
1663	vp_oper->state.link_state = vp_admin->link_state;
1664
1665	if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1666		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1667
1668	/* iterate over QPs owned by this slave, using UPDATE_QP */
1669	work->port = port;
1670	work->slave = slave;
1671	work->qos = vp_oper->state.default_qos;
1672	work->vlan_id = vp_oper->state.default_vlan;
1673	work->vlan_ix = vp_oper->vlan_idx;
1674	work->priv = priv;
1675	INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1676	queue_work(priv->mfunc.master.comm_wq, &work->work);
1677
1678	return 0;
1679}
1680
1681
1682static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1683{
1684	int port, err;
1685	struct mlx4_vport_state *vp_admin;
1686	struct mlx4_vport_oper_state *vp_oper;
1687	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1688			&priv->dev, slave);
1689	int min_port = find_first_bit(actv_ports.ports,
1690				      priv->dev.caps.num_ports) + 1;
1691	int max_port = min_port - 1 +
1692		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1693
1694	for (port = min_port; port <= max_port; port++) {
1695		if (!test_bit(port - 1, actv_ports.ports))
1696			continue;
1697		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1698			priv->mfunc.master.vf_admin[slave].enable_smi[port];
1699		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1700		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1701		vp_oper->state = *vp_admin;
1702		if (MLX4_VGT != vp_admin->default_vlan) {
1703			err = __mlx4_register_vlan(&priv->dev, port,
1704						   vp_admin->default_vlan, &(vp_oper->vlan_idx));
1705			if (err) {
1706				vp_oper->vlan_idx = NO_INDX;
1707				mlx4_warn(&priv->dev,
1708					  "No vlan resources slave %d, port %d\n",
1709					  slave, port);
1710				return err;
1711			}
1712			mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
1713				 (int)(vp_oper->state.default_vlan),
1714				 vp_oper->vlan_idx, slave, port);
1715		}
1716		if (vp_admin->spoofchk) {
1717			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1718							       port,
1719							       vp_admin->mac);
1720			if (0 > vp_oper->mac_idx) {
1721				err = vp_oper->mac_idx;
1722				vp_oper->mac_idx = NO_INDX;
1723				mlx4_warn(&priv->dev,
1724					  "No mac resources slave %d, port %d\n",
1725					  slave, port);
1726				return err;
1727			}
1728			mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
1729				 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1730		}
1731	}
1732	return 0;
1733}
1734
1735static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1736{
1737	int port;
1738	struct mlx4_vport_oper_state *vp_oper;
1739	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1740			&priv->dev, slave);
1741	int min_port = find_first_bit(actv_ports.ports,
1742				      priv->dev.caps.num_ports) + 1;
1743	int max_port = min_port - 1 +
1744		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1745
1746
1747	for (port = min_port; port <= max_port; port++) {
1748		if (!test_bit(port - 1, actv_ports.ports))
1749			continue;
1750		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1751			MLX4_VF_SMI_DISABLED;
1752		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1753		if (NO_INDX != vp_oper->vlan_idx) {
1754			__mlx4_unregister_vlan(&priv->dev,
1755					       port, vp_oper->state.default_vlan);
1756			vp_oper->vlan_idx = NO_INDX;
1757		}
1758		if (NO_INDX != vp_oper->mac_idx) {
1759			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
1760			vp_oper->mac_idx = NO_INDX;
1761		}
1762	}
1763	return;
1764}
1765
1766static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1767			       u16 param, u8 toggle)
1768{
1769	struct mlx4_priv *priv = mlx4_priv(dev);
1770	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1771	u32 reply;
1772	u8 is_going_down = 0;
1773	int i;
1774	unsigned long flags;
1775
1776	slave_state[slave].comm_toggle ^= 1;
1777	reply = (u32) slave_state[slave].comm_toggle << 31;
1778	if (toggle != slave_state[slave].comm_toggle) {
1779		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1780			  toggle, slave);
1781		goto reset_slave;
1782	}
1783	if (cmd == MLX4_COMM_CMD_RESET) {
1784		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1785		slave_state[slave].active = false;
1786		slave_state[slave].old_vlan_api = false;
1787		mlx4_master_deactivate_admin_state(priv, slave);
1788		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1789				slave_state[slave].event_eq[i].eqn = -1;
1790				slave_state[slave].event_eq[i].token = 0;
1791		}
1792		/*check if we are in the middle of FLR process,
1793		if so return "retry" status to the slave*/
1794		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1795			goto inform_slave_state;
1796
1797		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1798
1799		/* write the version in the event field */
1800		reply |= mlx4_comm_get_version();
1801
1802		goto reset_slave;
1803	}
1804	/*command from slave in the middle of FLR*/
1805	if (cmd != MLX4_COMM_CMD_RESET &&
1806	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1807		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1808			  slave, cmd);
1809		return;
1810	}
1811
1812	switch (cmd) {
1813	case MLX4_COMM_CMD_VHCR0:
1814		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1815			goto reset_slave;
1816		slave_state[slave].vhcr_dma = ((u64) param) << 48;
1817		priv->mfunc.master.slave_state[slave].cookie = 0;
1818		mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1819		break;
1820	case MLX4_COMM_CMD_VHCR1:
1821		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1822			goto reset_slave;
1823		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1824		break;
1825	case MLX4_COMM_CMD_VHCR2:
1826		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1827			goto reset_slave;
1828		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1829		break;
1830	case MLX4_COMM_CMD_VHCR_EN:
1831		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1832			goto reset_slave;
1833		slave_state[slave].vhcr_dma |= param;
1834		if (mlx4_master_activate_admin_state(priv, slave))
1835				goto reset_slave;
1836		slave_state[slave].active = true;
1837		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1838		break;
1839	case MLX4_COMM_CMD_VHCR_POST:
1840		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1841		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1842			goto reset_slave;
1843
1844		mutex_lock(&priv->cmd.slave_cmd_mutex);
1845		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1846			mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
1847				 slave);
1848			mutex_unlock(&priv->cmd.slave_cmd_mutex);
1849			goto reset_slave;
1850		}
1851		mutex_unlock(&priv->cmd.slave_cmd_mutex);
1852		break;
1853	default:
1854		mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
1855		goto reset_slave;
1856	}
1857	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1858	if (!slave_state[slave].is_slave_going_down)
1859		slave_state[slave].last_cmd = cmd;
1860	else
1861		is_going_down = 1;
1862	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1863	if (is_going_down) {
1864		mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
1865			  cmd, slave);
1866		return;
1867	}
1868	__raw_writel((__force u32) cpu_to_be32(reply),
1869		     &priv->mfunc.comm[slave].slave_read);
1870	mmiowb();
1871
1872	return;
1873
1874reset_slave:
1875	/* cleanup any slave resources */
1876	mlx4_delete_all_resources_for_slave(dev, slave);
1877	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1878	if (!slave_state[slave].is_slave_going_down)
1879		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
1880	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1881	/*with slave in the middle of flr, no need to clean resources again.*/
1882inform_slave_state:
1883	memset(&slave_state[slave].event_eq, 0,
1884	       sizeof(struct mlx4_slave_event_eq_info));
1885	__raw_writel((__force u32) cpu_to_be32(reply),
1886		     &priv->mfunc.comm[slave].slave_read);
1887	wmb();
1888}
1889
1890/* master command processing */
1891void mlx4_master_comm_channel(struct work_struct *work)
1892{
1893	struct mlx4_mfunc_master_ctx *master =
1894		container_of(work,
1895			     struct mlx4_mfunc_master_ctx,
1896			     comm_work);
1897	struct mlx4_mfunc *mfunc =
1898		container_of(master, struct mlx4_mfunc, master);
1899	struct mlx4_priv *priv =
1900		container_of(mfunc, struct mlx4_priv, mfunc);
1901	struct mlx4_dev *dev = &priv->dev;
1902	__be32 *bit_vec;
1903	u32 comm_cmd;
1904	u32 vec;
1905	int i, j, slave;
1906	int toggle;
1907	int served = 0;
1908	int reported = 0;
1909	u32 slt;
1910
1911	bit_vec = master->comm_arm_bit_vector;
1912	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
1913		vec = be32_to_cpu(bit_vec[i]);
1914		for (j = 0; j < 32; j++) {
1915			if (!(vec & (1 << j)))
1916				continue;
1917			++reported;
1918			slave = (i * 32) + j;
1919			comm_cmd = swab32(readl(
1920					  &mfunc->comm[slave].slave_write));
1921			slt = swab32(readl(&mfunc->comm[slave].slave_read))
1922				     >> 31;
1923			toggle = comm_cmd >> 31;
1924			if (toggle != slt) {
1925				if (master->slave_state[slave].comm_toggle
1926				    != slt) {
1927					pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
1928						slave, slt,
1929						master->slave_state[slave].comm_toggle);
1930					master->slave_state[slave].comm_toggle =
1931						slt;
1932				}
1933				mlx4_master_do_cmd(dev, slave,
1934						   comm_cmd >> 16 & 0xff,
1935						   comm_cmd & 0xffff, toggle);
1936				++served;
1937			}
1938		}
1939	}
1940
1941	if (reported && reported != served)
1942		mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
1943			  reported, served);
1944
1945	if (mlx4_ARM_COMM_CHANNEL(dev))
1946		mlx4_warn(dev, "Failed to arm comm channel events\n");
1947}
1948
1949static int sync_toggles(struct mlx4_dev *dev)
1950{
1951	struct mlx4_priv *priv = mlx4_priv(dev);
1952	int wr_toggle;
1953	int rd_toggle;
1954	unsigned long end;
1955
1956	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
1957	end = jiffies + msecs_to_jiffies(5000);
1958
1959	while (time_before(jiffies, end)) {
1960		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
1961		if (rd_toggle == wr_toggle) {
1962			priv->cmd.comm_toggle = rd_toggle;
1963			return 0;
1964		}
1965
1966		cond_resched();
1967	}
1968
1969	/*
1970	 * we could reach here if for example the previous VM using this
1971	 * function misbehaved and left the channel with unsynced state. We
1972	 * should fix this here and give this VM a chance to use a properly
1973	 * synced channel
1974	 */
1975	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
1976	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
1977	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
1978	priv->cmd.comm_toggle = 0;
1979
1980	return 0;
1981}
1982
1983int mlx4_multi_func_init(struct mlx4_dev *dev)
1984{
1985	struct mlx4_priv *priv = mlx4_priv(dev);
1986	struct mlx4_slave_state *s_state;
1987	int i, j, err, port;
1988
1989	if (mlx4_is_master(dev))
1990		priv->mfunc.comm =
1991		ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
1992			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
1993	else
1994		priv->mfunc.comm =
1995		ioremap(pci_resource_start(dev->pdev, 2) +
1996			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1997	if (!priv->mfunc.comm) {
1998		mlx4_err(dev, "Couldn't map communication vector\n");
1999		goto err_vhcr;
2000	}
2001
2002	if (mlx4_is_master(dev)) {
2003		priv->mfunc.master.slave_state =
2004			kzalloc(dev->num_slaves *
2005				sizeof(struct mlx4_slave_state), GFP_KERNEL);
2006		if (!priv->mfunc.master.slave_state)
2007			goto err_comm;
2008
2009		priv->mfunc.master.vf_admin =
2010			kzalloc(dev->num_slaves *
2011				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2012		if (!priv->mfunc.master.vf_admin)
2013			goto err_comm_admin;
2014
2015		priv->mfunc.master.vf_oper =
2016			kzalloc(dev->num_slaves *
2017				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2018		if (!priv->mfunc.master.vf_oper)
2019			goto err_comm_oper;
2020
2021		for (i = 0; i < dev->num_slaves; ++i) {
2022			s_state = &priv->mfunc.master.slave_state[i];
2023			s_state->last_cmd = MLX4_COMM_CMD_RESET;
2024			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2025				s_state->event_eq[j].eqn = -1;
2026			__raw_writel((__force u32) 0,
2027				     &priv->mfunc.comm[i].slave_write);
2028			__raw_writel((__force u32) 0,
2029				     &priv->mfunc.comm[i].slave_read);
2030			mmiowb();
2031			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2032				s_state->vlan_filter[port] =
2033					kzalloc(sizeof(struct mlx4_vlan_fltr),
2034						GFP_KERNEL);
2035				if (!s_state->vlan_filter[port]) {
2036					if (--port)
2037						kfree(s_state->vlan_filter[port]);
2038					goto err_slaves;
2039				}
2040				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2041				priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
2042				priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
2043				priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
2044				priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
2045			}
2046			spin_lock_init(&s_state->lock);
2047		}
2048
2049		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
2050		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2051		INIT_WORK(&priv->mfunc.master.comm_work,
2052			  mlx4_master_comm_channel);
2053		INIT_WORK(&priv->mfunc.master.slave_event_work,
2054			  mlx4_gen_slave_eqe);
2055		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2056			  mlx4_master_handle_slave_flr);
2057		spin_lock_init(&priv->mfunc.master.slave_state_lock);
2058		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2059		priv->mfunc.master.comm_wq =
2060			create_singlethread_workqueue("mlx4_comm");
2061		if (!priv->mfunc.master.comm_wq)
2062			goto err_slaves;
2063
2064		if (mlx4_init_resource_tracker(dev))
2065			goto err_thread;
2066
2067		err = mlx4_ARM_COMM_CHANNEL(dev);
2068		if (err) {
2069			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
2070				 err);
2071			goto err_resource;
2072		}
2073
2074	} else {
2075		err = sync_toggles(dev);
2076		if (err) {
2077			mlx4_err(dev, "Couldn't sync toggles\n");
2078			goto err_comm;
2079		}
2080	}
2081	return 0;
2082
2083err_resource:
2084	mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
2085err_thread:
2086	flush_workqueue(priv->mfunc.master.comm_wq);
2087	destroy_workqueue(priv->mfunc.master.comm_wq);
2088err_slaves:
2089	while (--i) {
2090		for (port = 1; port <= MLX4_MAX_PORTS; port++)
2091			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2092	}
2093	kfree(priv->mfunc.master.vf_oper);
2094err_comm_oper:
2095	kfree(priv->mfunc.master.vf_admin);
2096err_comm_admin:
2097	kfree(priv->mfunc.master.slave_state);
2098err_comm:
2099	iounmap(priv->mfunc.comm);
2100err_vhcr:
2101	dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2102					     priv->mfunc.vhcr,
2103					     priv->mfunc.vhcr_dma);
2104	priv->mfunc.vhcr = NULL;
2105	return -ENOMEM;
2106}
2107
2108int mlx4_cmd_init(struct mlx4_dev *dev)
2109{
2110	struct mlx4_priv *priv = mlx4_priv(dev);
2111
2112	mutex_init(&priv->cmd.hcr_mutex);
2113	mutex_init(&priv->cmd.slave_cmd_mutex);
2114	sema_init(&priv->cmd.poll_sem, 1);
2115	priv->cmd.use_events = 0;
2116	priv->cmd.toggle     = 1;
2117
2118	priv->cmd.hcr = NULL;
2119	priv->mfunc.vhcr = NULL;
2120
2121	if (!mlx4_is_slave(dev)) {
2122		priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
2123					MLX4_HCR_BASE, MLX4_HCR_SIZE);
2124		if (!priv->cmd.hcr) {
2125			mlx4_err(dev, "Couldn't map command register\n");
2126			return -ENOMEM;
2127		}
2128	}
2129
2130	if (mlx4_is_mfunc(dev)) {
2131		priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
2132						      &priv->mfunc.vhcr_dma,
2133						      GFP_KERNEL);
2134		if (!priv->mfunc.vhcr)
2135			goto err_hcr;
2136	}
2137
2138	priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
2139					 MLX4_MAILBOX_SIZE,
2140					 MLX4_MAILBOX_SIZE, 0);
2141	if (!priv->cmd.pool)
2142		goto err_vhcr;
2143
2144	return 0;
2145
2146err_vhcr:
2147	if (mlx4_is_mfunc(dev))
2148		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2149				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2150	priv->mfunc.vhcr = NULL;
2151
2152err_hcr:
2153	if (!mlx4_is_slave(dev))
2154		iounmap(priv->cmd.hcr);
2155	return -ENOMEM;
2156}
2157
2158void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2159{
2160	struct mlx4_priv *priv = mlx4_priv(dev);
2161	int i, port;
2162
2163	if (mlx4_is_master(dev)) {
2164		flush_workqueue(priv->mfunc.master.comm_wq);
2165		destroy_workqueue(priv->mfunc.master.comm_wq);
2166		for (i = 0; i < dev->num_slaves; i++) {
2167			for (port = 1; port <= MLX4_MAX_PORTS; port++)
2168				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2169		}
2170		kfree(priv->mfunc.master.slave_state);
2171		kfree(priv->mfunc.master.vf_admin);
2172		kfree(priv->mfunc.master.vf_oper);
2173	}
2174
2175	iounmap(priv->mfunc.comm);
2176}
2177
2178void mlx4_cmd_cleanup(struct mlx4_dev *dev)
2179{
2180	struct mlx4_priv *priv = mlx4_priv(dev);
2181
2182	pci_pool_destroy(priv->cmd.pool);
2183
2184	if (!mlx4_is_slave(dev))
2185		iounmap(priv->cmd.hcr);
2186	if (mlx4_is_mfunc(dev))
2187		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2188				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2189	priv->mfunc.vhcr = NULL;
2190}
2191
2192/*
2193 * Switch to using events to issue FW commands (can only be called
2194 * after event queue for command events has been initialized).
2195 */
2196int mlx4_cmd_use_events(struct mlx4_dev *dev)
2197{
2198	struct mlx4_priv *priv = mlx4_priv(dev);
2199	int i;
2200	int err = 0;
2201
2202	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2203				   sizeof (struct mlx4_cmd_context),
2204				   GFP_KERNEL);
2205	if (!priv->cmd.context)
2206		return -ENOMEM;
2207
2208	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2209		priv->cmd.context[i].token = i;
2210		priv->cmd.context[i].next  = i + 1;
2211	}
2212
2213	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2214	priv->cmd.free_head = 0;
2215
2216	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2217	spin_lock_init(&priv->cmd.context_lock);
2218
2219	for (priv->cmd.token_mask = 1;
2220	     priv->cmd.token_mask < priv->cmd.max_cmds;
2221	     priv->cmd.token_mask <<= 1)
2222		; /* nothing */
2223	--priv->cmd.token_mask;
2224
2225	down(&priv->cmd.poll_sem);
2226	priv->cmd.use_events = 1;
2227
2228	return err;
2229}
2230
2231/*
2232 * Switch back to polling (used when shutting down the device)
2233 */
2234void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2235{
2236	struct mlx4_priv *priv = mlx4_priv(dev);
2237	int i;
2238
2239	priv->cmd.use_events = 0;
2240
2241	for (i = 0; i < priv->cmd.max_cmds; ++i)
2242		down(&priv->cmd.event_sem);
2243
2244	kfree(priv->cmd.context);
2245
2246	up(&priv->cmd.poll_sem);
2247}
2248
2249struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2250{
2251	struct mlx4_cmd_mailbox *mailbox;
2252
2253	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2254	if (!mailbox)
2255		return ERR_PTR(-ENOMEM);
2256
2257	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2258				      &mailbox->dma);
2259	if (!mailbox->buf) {
2260		kfree(mailbox);
2261		return ERR_PTR(-ENOMEM);
2262	}
2263
2264	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2265
2266	return mailbox;
2267}
2268EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2269
2270void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2271			   struct mlx4_cmd_mailbox *mailbox)
2272{
2273	if (!mailbox)
2274		return;
2275
2276	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2277	kfree(mailbox);
2278}
2279EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2280
2281u32 mlx4_comm_get_version(void)
2282{
2283	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2284}
2285
2286static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2287{
2288	if ((vf < 0) || (vf >= dev->num_vfs)) {
2289		mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
2290		return -EINVAL;
2291	}
2292
2293	return vf+1;
2294}
2295
2296int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2297{
2298	if (slave < 1 || slave > dev->num_vfs) {
2299		mlx4_err(dev,
2300			 "Bad slave number:%d (number of activated slaves: %lu)\n",
2301			 slave, dev->num_slaves);
2302		return -EINVAL;
2303	}
2304	return slave - 1;
2305}
2306
2307struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2308{
2309	struct mlx4_active_ports actv_ports;
2310	int vf;
2311
2312	bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2313
2314	if (slave == 0) {
2315		bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2316		return actv_ports;
2317	}
2318
2319	vf = mlx4_get_vf_indx(dev, slave);
2320	if (vf < 0)
2321		return actv_ports;
2322
2323	bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2324		   min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2325		   dev->caps.num_ports));
2326
2327	return actv_ports;
2328}
2329EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2330
2331int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2332{
2333	unsigned n;
2334	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2335	unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2336
2337	if (port <= 0 || port > m)
2338		return -EINVAL;
2339
2340	n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2341	if (port <= n)
2342		port = n + 1;
2343
2344	return port;
2345}
2346EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2347
2348int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2349{
2350	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2351	if (test_bit(port - 1, actv_ports.ports))
2352		return port -
2353			find_first_bit(actv_ports.ports, dev->caps.num_ports);
2354
2355	return -1;
2356}
2357EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2358
2359struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2360						   int port)
2361{
2362	unsigned i;
2363	struct mlx4_slaves_pport slaves_pport;
2364
2365	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2366
2367	if (port <= 0 || port > dev->caps.num_ports)
2368		return slaves_pport;
2369
2370	for (i = 0; i < dev->num_vfs + 1; i++) {
2371		struct mlx4_active_ports actv_ports =
2372			mlx4_get_active_ports(dev, i);
2373		if (test_bit(port - 1, actv_ports.ports))
2374			set_bit(i, slaves_pport.slaves);
2375	}
2376
2377	return slaves_pport;
2378}
2379EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2380
2381struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2382		struct mlx4_dev *dev,
2383		const struct mlx4_active_ports *crit_ports)
2384{
2385	unsigned i;
2386	struct mlx4_slaves_pport slaves_pport;
2387
2388	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2389
2390	for (i = 0; i < dev->num_vfs + 1; i++) {
2391		struct mlx4_active_ports actv_ports =
2392			mlx4_get_active_ports(dev, i);
2393		if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2394				 dev->caps.num_ports))
2395			set_bit(i, slaves_pport.slaves);
2396	}
2397
2398	return slaves_pport;
2399}
2400EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2401
2402static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2403{
2404	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2405	int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2406			+ 1;
2407	int max_port = min_port +
2408		bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2409
2410	if (port < min_port)
2411		port = min_port;
2412	else if (port >= max_port)
2413		port = max_port - 1;
2414
2415	return port;
2416}
2417
2418int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2419{
2420	struct mlx4_priv *priv = mlx4_priv(dev);
2421	struct mlx4_vport_state *s_info;
2422	int slave;
2423
2424	if (!mlx4_is_master(dev))
2425		return -EPROTONOSUPPORT;
2426
2427	slave = mlx4_get_slave_indx(dev, vf);
2428	if (slave < 0)
2429		return -EINVAL;
2430
2431	port = mlx4_slaves_closest_port(dev, slave, port);
2432	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2433	s_info->mac = mac;
2434	mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2435		  vf, port, s_info->mac);
2436	return 0;
2437}
2438EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2439
2440
2441int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2442{
2443	struct mlx4_priv *priv = mlx4_priv(dev);
2444	struct mlx4_vport_state *vf_admin;
2445	int slave;
2446
2447	if ((!mlx4_is_master(dev)) ||
2448	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2449		return -EPROTONOSUPPORT;
2450
2451	if ((vlan > 4095) || (qos > 7))
2452		return -EINVAL;
2453
2454	slave = mlx4_get_slave_indx(dev, vf);
2455	if (slave < 0)
2456		return -EINVAL;
2457
2458	port = mlx4_slaves_closest_port(dev, slave, port);
2459	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2460
2461	if ((0 == vlan) && (0 == qos))
2462		vf_admin->default_vlan = MLX4_VGT;
2463	else
2464		vf_admin->default_vlan = vlan;
2465	vf_admin->default_qos = qos;
2466
2467	if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2468		mlx4_info(dev,
2469			  "updating vf %d port %d config will take effect on next VF restart\n",
2470			  vf, port);
2471	return 0;
2472}
2473EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2474
2475 /* mlx4_get_slave_default_vlan -
2476 * return true if VST ( default vlan)
2477 * if VST, will return vlan & qos (if not NULL)
2478 */
2479bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2480				 u16 *vlan, u8 *qos)
2481{
2482	struct mlx4_vport_oper_state *vp_oper;
2483	struct mlx4_priv *priv;
2484
2485	priv = mlx4_priv(dev);
2486	port = mlx4_slaves_closest_port(dev, slave, port);
2487	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2488
2489	if (MLX4_VGT != vp_oper->state.default_vlan) {
2490		if (vlan)
2491			*vlan = vp_oper->state.default_vlan;
2492		if (qos)
2493			*qos = vp_oper->state.default_qos;
2494		return true;
2495	}
2496	return false;
2497}
2498EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2499
2500int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2501{
2502	struct mlx4_priv *priv = mlx4_priv(dev);
2503	struct mlx4_vport_state *s_info;
2504	int slave;
2505
2506	if ((!mlx4_is_master(dev)) ||
2507	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2508		return -EPROTONOSUPPORT;
2509
2510	slave = mlx4_get_slave_indx(dev, vf);
2511	if (slave < 0)
2512		return -EINVAL;
2513
2514	port = mlx4_slaves_closest_port(dev, slave, port);
2515	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2516	s_info->spoofchk = setting;
2517
2518	return 0;
2519}
2520EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2521
2522int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
2523{
2524	struct mlx4_priv *priv = mlx4_priv(dev);
2525	struct mlx4_vport_state *s_info;
2526	int slave;
2527
2528	if (!mlx4_is_master(dev))
2529		return -EPROTONOSUPPORT;
2530
2531	slave = mlx4_get_slave_indx(dev, vf);
2532	if (slave < 0)
2533		return -EINVAL;
2534
2535	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2536	ivf->vf = vf;
2537
2538	/* need to convert it to a func */
2539	ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
2540	ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
2541	ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
2542	ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
2543	ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2544	ivf->mac[5] = ((s_info->mac)  & 0xff);
2545
2546	ivf->vlan		= s_info->default_vlan;
2547	ivf->qos		= s_info->default_qos;
2548	ivf->max_tx_rate	= s_info->tx_rate;
2549	ivf->min_tx_rate	= 0;
2550	ivf->spoofchk		= s_info->spoofchk;
2551	ivf->linkstate		= s_info->link_state;
2552
2553	return 0;
2554}
2555EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
2556
2557int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2558{
2559	struct mlx4_priv *priv = mlx4_priv(dev);
2560	struct mlx4_vport_state *s_info;
2561	int slave;
2562	u8 link_stat_event;
2563
2564	slave = mlx4_get_slave_indx(dev, vf);
2565	if (slave < 0)
2566		return -EINVAL;
2567
2568	port = mlx4_slaves_closest_port(dev, slave, port);
2569	switch (link_state) {
2570	case IFLA_VF_LINK_STATE_AUTO:
2571		/* get current link state */
2572		if (!priv->sense.do_sense_port[port])
2573			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2574		else
2575			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2576	    break;
2577
2578	case IFLA_VF_LINK_STATE_ENABLE:
2579		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2580	    break;
2581
2582	case IFLA_VF_LINK_STATE_DISABLE:
2583		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2584	    break;
2585
2586	default:
2587		mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2588			  link_state, slave, port);
2589		return -EINVAL;
2590	};
2591	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2592	s_info->link_state = link_state;
2593
2594	/* send event */
2595	mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
2596
2597	if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2598		mlx4_dbg(dev,
2599			 "updating vf %d port %d no link state HW enforcment\n",
2600			 vf, port);
2601	return 0;
2602}
2603EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
2604
2605int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
2606{
2607	struct mlx4_priv *priv = mlx4_priv(dev);
2608
2609	if (slave < 1 || slave >= dev->num_slaves ||
2610	    port < 1 || port > MLX4_MAX_PORTS)
2611		return 0;
2612
2613	return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
2614		MLX4_VF_SMI_ENABLED;
2615}
2616EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
2617
2618int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
2619{
2620	struct mlx4_priv *priv = mlx4_priv(dev);
2621
2622	if (slave == mlx4_master_func_num(dev))
2623		return 1;
2624
2625	if (slave < 1 || slave >= dev->num_slaves ||
2626	    port < 1 || port > MLX4_MAX_PORTS)
2627		return 0;
2628
2629	return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
2630		MLX4_VF_SMI_ENABLED;
2631}
2632EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
2633
2634int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
2635				 int enabled)
2636{
2637	struct mlx4_priv *priv = mlx4_priv(dev);
2638
2639	if (slave == mlx4_master_func_num(dev))
2640		return 0;
2641
2642	if (slave < 1 || slave >= dev->num_slaves ||
2643	    port < 1 || port > MLX4_MAX_PORTS ||
2644	    enabled < 0 || enabled > 1)
2645		return -EINVAL;
2646
2647	priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
2648	return 0;
2649}
2650EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
2651