1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
41#include <linux/slab.h>
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44#include <linux/if_ether.h>
45#include <linux/etherdevice.h>
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID		(1ull << 63)
51
52struct mac_res {
53	struct list_head list;
54	u64 mac;
55	int ref_count;
56	u8 smac_index;
57	u8 port;
58};
59
60struct vlan_res {
61	struct list_head list;
62	u16 vlan;
63	int ref_count;
64	int vlan_index;
65	u8 port;
66};
67
68struct res_common {
69	struct list_head	list;
70	struct rb_node		node;
71	u64		        res_id;
72	int			owner;
73	int			state;
74	int			from_state;
75	int			to_state;
76	int			removing;
77};
78
79enum {
80	RES_ANY_BUSY = 1
81};
82
83struct res_gid {
84	struct list_head	list;
85	u8			gid[16];
86	enum mlx4_protocol	prot;
87	enum mlx4_steer_type	steer;
88	u64			reg_id;
89};
90
91enum res_qp_states {
92	RES_QP_BUSY = RES_ANY_BUSY,
93
94	/* QP number was allocated */
95	RES_QP_RESERVED,
96
97	/* ICM memory for QP context was mapped */
98	RES_QP_MAPPED,
99
100	/* QP is in hw ownership */
101	RES_QP_HW
102};
103
104struct res_qp {
105	struct res_common	com;
106	struct res_mtt	       *mtt;
107	struct res_cq	       *rcq;
108	struct res_cq	       *scq;
109	struct res_srq	       *srq;
110	struct list_head	mcg_list;
111	spinlock_t		mcg_spl;
112	int			local_qpn;
113	atomic_t		ref_count;
114	u32			qpc_flags;
115	/* saved qp params before VST enforcement in order to restore on VGT */
116	u8			sched_queue;
117	__be32			param3;
118	u8			vlan_control;
119	u8			fvl_rx;
120	u8			pri_path_fl;
121	u8			vlan_index;
122	u8			feup;
123};
124
125enum res_mtt_states {
126	RES_MTT_BUSY = RES_ANY_BUSY,
127	RES_MTT_ALLOCATED,
128};
129
130static inline const char *mtt_states_str(enum res_mtt_states state)
131{
132	switch (state) {
133	case RES_MTT_BUSY: return "RES_MTT_BUSY";
134	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135	default: return "Unknown";
136	}
137}
138
139struct res_mtt {
140	struct res_common	com;
141	int			order;
142	atomic_t		ref_count;
143};
144
145enum res_mpt_states {
146	RES_MPT_BUSY = RES_ANY_BUSY,
147	RES_MPT_RESERVED,
148	RES_MPT_MAPPED,
149	RES_MPT_HW,
150};
151
152struct res_mpt {
153	struct res_common	com;
154	struct res_mtt	       *mtt;
155	int			key;
156};
157
158enum res_eq_states {
159	RES_EQ_BUSY = RES_ANY_BUSY,
160	RES_EQ_RESERVED,
161	RES_EQ_HW,
162};
163
164struct res_eq {
165	struct res_common	com;
166	struct res_mtt	       *mtt;
167};
168
169enum res_cq_states {
170	RES_CQ_BUSY = RES_ANY_BUSY,
171	RES_CQ_ALLOCATED,
172	RES_CQ_HW,
173};
174
175struct res_cq {
176	struct res_common	com;
177	struct res_mtt	       *mtt;
178	atomic_t		ref_count;
179};
180
181enum res_srq_states {
182	RES_SRQ_BUSY = RES_ANY_BUSY,
183	RES_SRQ_ALLOCATED,
184	RES_SRQ_HW,
185};
186
187struct res_srq {
188	struct res_common	com;
189	struct res_mtt	       *mtt;
190	struct res_cq	       *cq;
191	atomic_t		ref_count;
192};
193
194enum res_counter_states {
195	RES_COUNTER_BUSY = RES_ANY_BUSY,
196	RES_COUNTER_ALLOCATED,
197};
198
199struct res_counter {
200	struct res_common	com;
201	int			port;
202};
203
204enum res_xrcdn_states {
205	RES_XRCD_BUSY = RES_ANY_BUSY,
206	RES_XRCD_ALLOCATED,
207};
208
209struct res_xrcdn {
210	struct res_common	com;
211	int			port;
212};
213
214enum res_fs_rule_states {
215	RES_FS_RULE_BUSY = RES_ANY_BUSY,
216	RES_FS_RULE_ALLOCATED,
217};
218
219struct res_fs_rule {
220	struct res_common	com;
221	int			qpn;
222};
223
224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226	return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230{
231	struct rb_node *node = root->rb_node;
232
233	while (node) {
234		struct res_common *res = container_of(node, struct res_common,
235						      node);
236
237		if (res_id < res->res_id)
238			node = node->rb_left;
239		else if (res_id > res->res_id)
240			node = node->rb_right;
241		else
242			return res;
243	}
244	return NULL;
245}
246
247static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248{
249	struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251	/* Figure out where to put new node */
252	while (*new) {
253		struct res_common *this = container_of(*new, struct res_common,
254						       node);
255
256		parent = *new;
257		if (res->res_id < this->res_id)
258			new = &((*new)->rb_left);
259		else if (res->res_id > this->res_id)
260			new = &((*new)->rb_right);
261		else
262			return -EEXIST;
263	}
264
265	/* Add new node and rebalance tree. */
266	rb_link_node(&res->node, parent, new);
267	rb_insert_color(&res->node, root);
268
269	return 0;
270}
271
272enum qp_transition {
273	QP_TRANS_INIT2RTR,
274	QP_TRANS_RTR2RTS,
275	QP_TRANS_RTS2RTS,
276	QP_TRANS_SQERR2RTS,
277	QP_TRANS_SQD2SQD,
278	QP_TRANS_SQD2RTS
279};
280
281/* For Debug uses */
282static const char *resource_str(enum mlx4_resource rt)
283{
284	switch (rt) {
285	case RES_QP: return "RES_QP";
286	case RES_CQ: return "RES_CQ";
287	case RES_SRQ: return "RES_SRQ";
288	case RES_MPT: return "RES_MPT";
289	case RES_MTT: return "RES_MTT";
290	case RES_MAC: return  "RES_MAC";
291	case RES_VLAN: return  "RES_VLAN";
292	case RES_EQ: return "RES_EQ";
293	case RES_COUNTER: return "RES_COUNTER";
294	case RES_FS_RULE: return "RES_FS_RULE";
295	case RES_XRCD: return "RES_XRCD";
296	default: return "Unknown resource type !!!";
297	};
298}
299
300static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302				      enum mlx4_resource res_type, int count,
303				      int port)
304{
305	struct mlx4_priv *priv = mlx4_priv(dev);
306	struct resource_allocator *res_alloc =
307		&priv->mfunc.master.res_tracker.res_alloc[res_type];
308	int err = -EINVAL;
309	int allocated, free, reserved, guaranteed, from_free;
310	int from_rsvd;
311
312	if (slave > dev->num_vfs)
313		return -EINVAL;
314
315	spin_lock(&res_alloc->alloc_lock);
316	allocated = (port > 0) ?
317		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
318		res_alloc->allocated[slave];
319	free = (port > 0) ? res_alloc->res_port_free[port - 1] :
320		res_alloc->res_free;
321	reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
322		res_alloc->res_reserved;
323	guaranteed = res_alloc->guaranteed[slave];
324
325	if (allocated + count > res_alloc->quota[slave]) {
326		mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
327			  slave, port, resource_str(res_type), count,
328			  allocated, res_alloc->quota[slave]);
329		goto out;
330	}
331
332	if (allocated + count <= guaranteed) {
333		err = 0;
334		from_rsvd = count;
335	} else {
336		/* portion may need to be obtained from free area */
337		if (guaranteed - allocated > 0)
338			from_free = count - (guaranteed - allocated);
339		else
340			from_free = count;
341
342		from_rsvd = count - from_free;
343
344		if (free - from_free >= reserved)
345			err = 0;
346		else
347			mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
348				  slave, port, resource_str(res_type), free,
349				  from_free, reserved);
350	}
351
352	if (!err) {
353		/* grant the request */
354		if (port > 0) {
355			res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
356			res_alloc->res_port_free[port - 1] -= count;
357			res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358		} else {
359			res_alloc->allocated[slave] += count;
360			res_alloc->res_free -= count;
361			res_alloc->res_reserved -= from_rsvd;
362		}
363	}
364
365out:
366	spin_unlock(&res_alloc->alloc_lock);
367	return err;
368}
369
370static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
371				    enum mlx4_resource res_type, int count,
372				    int port)
373{
374	struct mlx4_priv *priv = mlx4_priv(dev);
375	struct resource_allocator *res_alloc =
376		&priv->mfunc.master.res_tracker.res_alloc[res_type];
377	int allocated, guaranteed, from_rsvd;
378
379	if (slave > dev->num_vfs)
380		return;
381
382	spin_lock(&res_alloc->alloc_lock);
383
384	allocated = (port > 0) ?
385		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
386		res_alloc->allocated[slave];
387	guaranteed = res_alloc->guaranteed[slave];
388
389	if (allocated - count >= guaranteed) {
390		from_rsvd = 0;
391	} else {
392		/* portion may need to be returned to reserved area */
393		if (allocated - guaranteed > 0)
394			from_rsvd = count - (allocated - guaranteed);
395		else
396			from_rsvd = count;
397	}
398
399	if (port > 0) {
400		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
401		res_alloc->res_port_free[port - 1] += count;
402		res_alloc->res_port_rsvd[port - 1] += from_rsvd;
403	} else {
404		res_alloc->allocated[slave] -= count;
405		res_alloc->res_free += count;
406		res_alloc->res_reserved += from_rsvd;
407	}
408
409	spin_unlock(&res_alloc->alloc_lock);
410	return;
411}
412
413static inline void initialize_res_quotas(struct mlx4_dev *dev,
414					 struct resource_allocator *res_alloc,
415					 enum mlx4_resource res_type,
416					 int vf, int num_instances)
417{
418	res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
419	res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420	if (vf == mlx4_master_func_num(dev)) {
421		res_alloc->res_free = num_instances;
422		if (res_type == RES_MTT) {
423			/* reserved mtts will be taken out of the PF allocation */
424			res_alloc->res_free += dev->caps.reserved_mtts;
425			res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426			res_alloc->quota[vf] += dev->caps.reserved_mtts;
427		}
428	}
429}
430
431void mlx4_init_quotas(struct mlx4_dev *dev)
432{
433	struct mlx4_priv *priv = mlx4_priv(dev);
434	int pf;
435
436	/* quotas for VFs are initialized in mlx4_slave_cap */
437	if (mlx4_is_slave(dev))
438		return;
439
440	if (!mlx4_is_mfunc(dev)) {
441		dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442			mlx4_num_reserved_sqps(dev);
443		dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444		dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445		dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446		dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
447		return;
448	}
449
450	pf = mlx4_master_func_num(dev);
451	dev->quotas.qp =
452		priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
453	dev->quotas.cq =
454		priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
455	dev->quotas.srq =
456		priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
457	dev->quotas.mtt =
458		priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
459	dev->quotas.mpt =
460		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
461}
462int mlx4_init_resource_tracker(struct mlx4_dev *dev)
463{
464	struct mlx4_priv *priv = mlx4_priv(dev);
465	int i, j;
466	int t;
467
468	priv->mfunc.master.res_tracker.slave_list =
469		kzalloc(dev->num_slaves * sizeof(struct slave_list),
470			GFP_KERNEL);
471	if (!priv->mfunc.master.res_tracker.slave_list)
472		return -ENOMEM;
473
474	for (i = 0 ; i < dev->num_slaves; i++) {
475		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477				       slave_list[i].res_list[t]);
478		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
479	}
480
481	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
482		 dev->num_slaves);
483	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
484		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
485
486	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487		struct resource_allocator *res_alloc =
488			&priv->mfunc.master.res_tracker.res_alloc[i];
489		res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
490		res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
491		if (i == RES_MAC || i == RES_VLAN)
492			res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
493						       (dev->num_vfs + 1) * sizeof(int),
494							GFP_KERNEL);
495		else
496			res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
497
498		if (!res_alloc->quota || !res_alloc->guaranteed ||
499		    !res_alloc->allocated)
500			goto no_mem_err;
501
502		spin_lock_init(&res_alloc->alloc_lock);
503		for (t = 0; t < dev->num_vfs + 1; t++) {
504			struct mlx4_active_ports actv_ports =
505				mlx4_get_active_ports(dev, t);
506			switch (i) {
507			case RES_QP:
508				initialize_res_quotas(dev, res_alloc, RES_QP,
509						      t, dev->caps.num_qps -
510						      dev->caps.reserved_qps -
511						      mlx4_num_reserved_sqps(dev));
512				break;
513			case RES_CQ:
514				initialize_res_quotas(dev, res_alloc, RES_CQ,
515						      t, dev->caps.num_cqs -
516						      dev->caps.reserved_cqs);
517				break;
518			case RES_SRQ:
519				initialize_res_quotas(dev, res_alloc, RES_SRQ,
520						      t, dev->caps.num_srqs -
521						      dev->caps.reserved_srqs);
522				break;
523			case RES_MPT:
524				initialize_res_quotas(dev, res_alloc, RES_MPT,
525						      t, dev->caps.num_mpts -
526						      dev->caps.reserved_mrws);
527				break;
528			case RES_MTT:
529				initialize_res_quotas(dev, res_alloc, RES_MTT,
530						      t, dev->caps.num_mtts -
531						      dev->caps.reserved_mtts);
532				break;
533			case RES_MAC:
534				if (t == mlx4_master_func_num(dev)) {
535					int max_vfs_pport = 0;
536					/* Calculate the max vfs per port for */
537					/* both ports.			      */
538					for (j = 0; j < dev->caps.num_ports;
539					     j++) {
540						struct mlx4_slaves_pport slaves_pport =
541							mlx4_phys_to_slaves_pport(dev, j + 1);
542						unsigned current_slaves =
543							bitmap_weight(slaves_pport.slaves,
544								      dev->caps.num_ports) - 1;
545						if (max_vfs_pport < current_slaves)
546							max_vfs_pport =
547								current_slaves;
548					}
549					res_alloc->quota[t] =
550						MLX4_MAX_MAC_NUM -
551						2 * max_vfs_pport;
552					res_alloc->guaranteed[t] = 2;
553					for (j = 0; j < MLX4_MAX_PORTS; j++)
554						res_alloc->res_port_free[j] =
555							MLX4_MAX_MAC_NUM;
556				} else {
557					res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
558					res_alloc->guaranteed[t] = 2;
559				}
560				break;
561			case RES_VLAN:
562				if (t == mlx4_master_func_num(dev)) {
563					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
564					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
565					for (j = 0; j < MLX4_MAX_PORTS; j++)
566						res_alloc->res_port_free[j] =
567							res_alloc->quota[t];
568				} else {
569					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
570					res_alloc->guaranteed[t] = 0;
571				}
572				break;
573			case RES_COUNTER:
574				res_alloc->quota[t] = dev->caps.max_counters;
575				res_alloc->guaranteed[t] = 0;
576				if (t == mlx4_master_func_num(dev))
577					res_alloc->res_free = res_alloc->quota[t];
578				break;
579			default:
580				break;
581			}
582			if (i == RES_MAC || i == RES_VLAN) {
583				for (j = 0; j < dev->caps.num_ports; j++)
584					if (test_bit(j, actv_ports.ports))
585						res_alloc->res_port_rsvd[j] +=
586							res_alloc->guaranteed[t];
587			} else {
588				res_alloc->res_reserved += res_alloc->guaranteed[t];
589			}
590		}
591	}
592	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
593	return 0;
594
595no_mem_err:
596	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
597		kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
598		priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
599		kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
600		priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
601		kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
602		priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
603	}
604	return -ENOMEM;
605}
606
607void mlx4_free_resource_tracker(struct mlx4_dev *dev,
608				enum mlx4_res_tracker_free_type type)
609{
610	struct mlx4_priv *priv = mlx4_priv(dev);
611	int i;
612
613	if (priv->mfunc.master.res_tracker.slave_list) {
614		if (type != RES_TR_FREE_STRUCTS_ONLY) {
615			for (i = 0; i < dev->num_slaves; i++) {
616				if (type == RES_TR_FREE_ALL ||
617				    dev->caps.function != i)
618					mlx4_delete_all_resources_for_slave(dev, i);
619			}
620			/* free master's vlans */
621			i = dev->caps.function;
622			mlx4_reset_roce_gids(dev, i);
623			mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
624			rem_slave_vlans(dev, i);
625			mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
626		}
627
628		if (type != RES_TR_FREE_SLAVES_ONLY) {
629			for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
630				kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
631				priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
632				kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
633				priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
634				kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
635				priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
636			}
637			kfree(priv->mfunc.master.res_tracker.slave_list);
638			priv->mfunc.master.res_tracker.slave_list = NULL;
639		}
640	}
641}
642
643static void update_pkey_index(struct mlx4_dev *dev, int slave,
644			      struct mlx4_cmd_mailbox *inbox)
645{
646	u8 sched = *(u8 *)(inbox->buf + 64);
647	u8 orig_index = *(u8 *)(inbox->buf + 35);
648	u8 new_index;
649	struct mlx4_priv *priv = mlx4_priv(dev);
650	int port;
651
652	port = (sched >> 6 & 1) + 1;
653
654	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
655	*(u8 *)(inbox->buf + 35) = new_index;
656}
657
658static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
659		       u8 slave)
660{
661	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
662	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
663	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
664	int port;
665
666	if (MLX4_QP_ST_UD == ts) {
667		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
668		if (mlx4_is_eth(dev, port))
669			qp_ctx->pri_path.mgid_index =
670				mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
671		else
672			qp_ctx->pri_path.mgid_index = slave | 0x80;
673
674	} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
675		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
676			port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
677			if (mlx4_is_eth(dev, port)) {
678				qp_ctx->pri_path.mgid_index +=
679					mlx4_get_base_gid_ix(dev, slave, port);
680				qp_ctx->pri_path.mgid_index &= 0x7f;
681			} else {
682				qp_ctx->pri_path.mgid_index = slave & 0x7F;
683			}
684		}
685		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
686			port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
687			if (mlx4_is_eth(dev, port)) {
688				qp_ctx->alt_path.mgid_index +=
689					mlx4_get_base_gid_ix(dev, slave, port);
690				qp_ctx->alt_path.mgid_index &= 0x7f;
691			} else {
692				qp_ctx->alt_path.mgid_index = slave & 0x7F;
693			}
694		}
695	}
696}
697
698static int update_vport_qp_param(struct mlx4_dev *dev,
699				 struct mlx4_cmd_mailbox *inbox,
700				 u8 slave, u32 qpn)
701{
702	struct mlx4_qp_context	*qpc = inbox->buf + 8;
703	struct mlx4_vport_oper_state *vp_oper;
704	struct mlx4_priv *priv;
705	u32 qp_type;
706	int port;
707
708	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
709	priv = mlx4_priv(dev);
710	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
711	qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
712
713	if (MLX4_VGT != vp_oper->state.default_vlan) {
714		/* the reserved QPs (special, proxy, tunnel)
715		 * do not operate over vlans
716		 */
717		if (mlx4_is_qp_reserved(dev, qpn))
718			return 0;
719
720		/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
721		if (qp_type == MLX4_QP_ST_UD ||
722		    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
723			if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
724				*(__be32 *)inbox->buf =
725					cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
726					MLX4_QP_OPTPAR_VLAN_STRIPPING);
727				qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
728			} else {
729				struct mlx4_update_qp_params params = {.flags = 0};
730
731				mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
732			}
733		}
734
735		if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
736		    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
737			qpc->pri_path.vlan_control =
738				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
739				MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
740				MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
741				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
742				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
743				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
744		} else if (0 != vp_oper->state.default_vlan) {
745			qpc->pri_path.vlan_control =
746				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
747				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
748				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
749		} else { /* priority tagged */
750			qpc->pri_path.vlan_control =
751				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
752				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
753		}
754
755		qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
756		qpc->pri_path.vlan_index = vp_oper->vlan_idx;
757		qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
758		qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
759		qpc->pri_path.sched_queue &= 0xC7;
760		qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
761	}
762	if (vp_oper->state.spoofchk) {
763		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
764		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
765	}
766	return 0;
767}
768
769static int mpt_mask(struct mlx4_dev *dev)
770{
771	return dev->caps.num_mpts - 1;
772}
773
774static void *find_res(struct mlx4_dev *dev, u64 res_id,
775		      enum mlx4_resource type)
776{
777	struct mlx4_priv *priv = mlx4_priv(dev);
778
779	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
780				  res_id);
781}
782
783static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
784		   enum mlx4_resource type,
785		   void *res)
786{
787	struct res_common *r;
788	int err = 0;
789
790	spin_lock_irq(mlx4_tlock(dev));
791	r = find_res(dev, res_id, type);
792	if (!r) {
793		err = -ENONET;
794		goto exit;
795	}
796
797	if (r->state == RES_ANY_BUSY) {
798		err = -EBUSY;
799		goto exit;
800	}
801
802	if (r->owner != slave) {
803		err = -EPERM;
804		goto exit;
805	}
806
807	r->from_state = r->state;
808	r->state = RES_ANY_BUSY;
809
810	if (res)
811		*((struct res_common **)res) = r;
812
813exit:
814	spin_unlock_irq(mlx4_tlock(dev));
815	return err;
816}
817
818int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
819				    enum mlx4_resource type,
820				    u64 res_id, int *slave)
821{
822
823	struct res_common *r;
824	int err = -ENOENT;
825	int id = res_id;
826
827	if (type == RES_QP)
828		id &= 0x7fffff;
829	spin_lock(mlx4_tlock(dev));
830
831	r = find_res(dev, id, type);
832	if (r) {
833		*slave = r->owner;
834		err = 0;
835	}
836	spin_unlock(mlx4_tlock(dev));
837
838	return err;
839}
840
841static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
842		    enum mlx4_resource type)
843{
844	struct res_common *r;
845
846	spin_lock_irq(mlx4_tlock(dev));
847	r = find_res(dev, res_id, type);
848	if (r)
849		r->state = r->from_state;
850	spin_unlock_irq(mlx4_tlock(dev));
851}
852
853static struct res_common *alloc_qp_tr(int id)
854{
855	struct res_qp *ret;
856
857	ret = kzalloc(sizeof *ret, GFP_KERNEL);
858	if (!ret)
859		return NULL;
860
861	ret->com.res_id = id;
862	ret->com.state = RES_QP_RESERVED;
863	ret->local_qpn = id;
864	INIT_LIST_HEAD(&ret->mcg_list);
865	spin_lock_init(&ret->mcg_spl);
866	atomic_set(&ret->ref_count, 0);
867
868	return &ret->com;
869}
870
871static struct res_common *alloc_mtt_tr(int id, int order)
872{
873	struct res_mtt *ret;
874
875	ret = kzalloc(sizeof *ret, GFP_KERNEL);
876	if (!ret)
877		return NULL;
878
879	ret->com.res_id = id;
880	ret->order = order;
881	ret->com.state = RES_MTT_ALLOCATED;
882	atomic_set(&ret->ref_count, 0);
883
884	return &ret->com;
885}
886
887static struct res_common *alloc_mpt_tr(int id, int key)
888{
889	struct res_mpt *ret;
890
891	ret = kzalloc(sizeof *ret, GFP_KERNEL);
892	if (!ret)
893		return NULL;
894
895	ret->com.res_id = id;
896	ret->com.state = RES_MPT_RESERVED;
897	ret->key = key;
898
899	return &ret->com;
900}
901
902static struct res_common *alloc_eq_tr(int id)
903{
904	struct res_eq *ret;
905
906	ret = kzalloc(sizeof *ret, GFP_KERNEL);
907	if (!ret)
908		return NULL;
909
910	ret->com.res_id = id;
911	ret->com.state = RES_EQ_RESERVED;
912
913	return &ret->com;
914}
915
916static struct res_common *alloc_cq_tr(int id)
917{
918	struct res_cq *ret;
919
920	ret = kzalloc(sizeof *ret, GFP_KERNEL);
921	if (!ret)
922		return NULL;
923
924	ret->com.res_id = id;
925	ret->com.state = RES_CQ_ALLOCATED;
926	atomic_set(&ret->ref_count, 0);
927
928	return &ret->com;
929}
930
931static struct res_common *alloc_srq_tr(int id)
932{
933	struct res_srq *ret;
934
935	ret = kzalloc(sizeof *ret, GFP_KERNEL);
936	if (!ret)
937		return NULL;
938
939	ret->com.res_id = id;
940	ret->com.state = RES_SRQ_ALLOCATED;
941	atomic_set(&ret->ref_count, 0);
942
943	return &ret->com;
944}
945
946static struct res_common *alloc_counter_tr(int id)
947{
948	struct res_counter *ret;
949
950	ret = kzalloc(sizeof *ret, GFP_KERNEL);
951	if (!ret)
952		return NULL;
953
954	ret->com.res_id = id;
955	ret->com.state = RES_COUNTER_ALLOCATED;
956
957	return &ret->com;
958}
959
960static struct res_common *alloc_xrcdn_tr(int id)
961{
962	struct res_xrcdn *ret;
963
964	ret = kzalloc(sizeof *ret, GFP_KERNEL);
965	if (!ret)
966		return NULL;
967
968	ret->com.res_id = id;
969	ret->com.state = RES_XRCD_ALLOCATED;
970
971	return &ret->com;
972}
973
974static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
975{
976	struct res_fs_rule *ret;
977
978	ret = kzalloc(sizeof *ret, GFP_KERNEL);
979	if (!ret)
980		return NULL;
981
982	ret->com.res_id = id;
983	ret->com.state = RES_FS_RULE_ALLOCATED;
984	ret->qpn = qpn;
985	return &ret->com;
986}
987
988static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
989				   int extra)
990{
991	struct res_common *ret;
992
993	switch (type) {
994	case RES_QP:
995		ret = alloc_qp_tr(id);
996		break;
997	case RES_MPT:
998		ret = alloc_mpt_tr(id, extra);
999		break;
1000	case RES_MTT:
1001		ret = alloc_mtt_tr(id, extra);
1002		break;
1003	case RES_EQ:
1004		ret = alloc_eq_tr(id);
1005		break;
1006	case RES_CQ:
1007		ret = alloc_cq_tr(id);
1008		break;
1009	case RES_SRQ:
1010		ret = alloc_srq_tr(id);
1011		break;
1012	case RES_MAC:
1013		pr_err("implementation missing\n");
1014		return NULL;
1015	case RES_COUNTER:
1016		ret = alloc_counter_tr(id);
1017		break;
1018	case RES_XRCD:
1019		ret = alloc_xrcdn_tr(id);
1020		break;
1021	case RES_FS_RULE:
1022		ret = alloc_fs_rule_tr(id, extra);
1023		break;
1024	default:
1025		return NULL;
1026	}
1027	if (ret)
1028		ret->owner = slave;
1029
1030	return ret;
1031}
1032
1033static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1034			 enum mlx4_resource type, int extra)
1035{
1036	int i;
1037	int err;
1038	struct mlx4_priv *priv = mlx4_priv(dev);
1039	struct res_common **res_arr;
1040	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1041	struct rb_root *root = &tracker->res_tree[type];
1042
1043	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1044	if (!res_arr)
1045		return -ENOMEM;
1046
1047	for (i = 0; i < count; ++i) {
1048		res_arr[i] = alloc_tr(base + i, type, slave, extra);
1049		if (!res_arr[i]) {
1050			for (--i; i >= 0; --i)
1051				kfree(res_arr[i]);
1052
1053			kfree(res_arr);
1054			return -ENOMEM;
1055		}
1056	}
1057
1058	spin_lock_irq(mlx4_tlock(dev));
1059	for (i = 0; i < count; ++i) {
1060		if (find_res(dev, base + i, type)) {
1061			err = -EEXIST;
1062			goto undo;
1063		}
1064		err = res_tracker_insert(root, res_arr[i]);
1065		if (err)
1066			goto undo;
1067		list_add_tail(&res_arr[i]->list,
1068			      &tracker->slave_list[slave].res_list[type]);
1069	}
1070	spin_unlock_irq(mlx4_tlock(dev));
1071	kfree(res_arr);
1072
1073	return 0;
1074
1075undo:
1076	for (--i; i >= base; --i)
1077		rb_erase(&res_arr[i]->node, root);
1078
1079	spin_unlock_irq(mlx4_tlock(dev));
1080
1081	for (i = 0; i < count; ++i)
1082		kfree(res_arr[i]);
1083
1084	kfree(res_arr);
1085
1086	return err;
1087}
1088
1089static int remove_qp_ok(struct res_qp *res)
1090{
1091	if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1092	    !list_empty(&res->mcg_list)) {
1093		pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1094		       res->com.state, atomic_read(&res->ref_count));
1095		return -EBUSY;
1096	} else if (res->com.state != RES_QP_RESERVED) {
1097		return -EPERM;
1098	}
1099
1100	return 0;
1101}
1102
1103static int remove_mtt_ok(struct res_mtt *res, int order)
1104{
1105	if (res->com.state == RES_MTT_BUSY ||
1106	    atomic_read(&res->ref_count)) {
1107		pr_devel("%s-%d: state %s, ref_count %d\n",
1108			 __func__, __LINE__,
1109			 mtt_states_str(res->com.state),
1110			 atomic_read(&res->ref_count));
1111		return -EBUSY;
1112	} else if (res->com.state != RES_MTT_ALLOCATED)
1113		return -EPERM;
1114	else if (res->order != order)
1115		return -EINVAL;
1116
1117	return 0;
1118}
1119
1120static int remove_mpt_ok(struct res_mpt *res)
1121{
1122	if (res->com.state == RES_MPT_BUSY)
1123		return -EBUSY;
1124	else if (res->com.state != RES_MPT_RESERVED)
1125		return -EPERM;
1126
1127	return 0;
1128}
1129
1130static int remove_eq_ok(struct res_eq *res)
1131{
1132	if (res->com.state == RES_MPT_BUSY)
1133		return -EBUSY;
1134	else if (res->com.state != RES_MPT_RESERVED)
1135		return -EPERM;
1136
1137	return 0;
1138}
1139
1140static int remove_counter_ok(struct res_counter *res)
1141{
1142	if (res->com.state == RES_COUNTER_BUSY)
1143		return -EBUSY;
1144	else if (res->com.state != RES_COUNTER_ALLOCATED)
1145		return -EPERM;
1146
1147	return 0;
1148}
1149
1150static int remove_xrcdn_ok(struct res_xrcdn *res)
1151{
1152	if (res->com.state == RES_XRCD_BUSY)
1153		return -EBUSY;
1154	else if (res->com.state != RES_XRCD_ALLOCATED)
1155		return -EPERM;
1156
1157	return 0;
1158}
1159
1160static int remove_fs_rule_ok(struct res_fs_rule *res)
1161{
1162	if (res->com.state == RES_FS_RULE_BUSY)
1163		return -EBUSY;
1164	else if (res->com.state != RES_FS_RULE_ALLOCATED)
1165		return -EPERM;
1166
1167	return 0;
1168}
1169
1170static int remove_cq_ok(struct res_cq *res)
1171{
1172	if (res->com.state == RES_CQ_BUSY)
1173		return -EBUSY;
1174	else if (res->com.state != RES_CQ_ALLOCATED)
1175		return -EPERM;
1176
1177	return 0;
1178}
1179
1180static int remove_srq_ok(struct res_srq *res)
1181{
1182	if (res->com.state == RES_SRQ_BUSY)
1183		return -EBUSY;
1184	else if (res->com.state != RES_SRQ_ALLOCATED)
1185		return -EPERM;
1186
1187	return 0;
1188}
1189
1190static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1191{
1192	switch (type) {
1193	case RES_QP:
1194		return remove_qp_ok((struct res_qp *)res);
1195	case RES_CQ:
1196		return remove_cq_ok((struct res_cq *)res);
1197	case RES_SRQ:
1198		return remove_srq_ok((struct res_srq *)res);
1199	case RES_MPT:
1200		return remove_mpt_ok((struct res_mpt *)res);
1201	case RES_MTT:
1202		return remove_mtt_ok((struct res_mtt *)res, extra);
1203	case RES_MAC:
1204		return -ENOSYS;
1205	case RES_EQ:
1206		return remove_eq_ok((struct res_eq *)res);
1207	case RES_COUNTER:
1208		return remove_counter_ok((struct res_counter *)res);
1209	case RES_XRCD:
1210		return remove_xrcdn_ok((struct res_xrcdn *)res);
1211	case RES_FS_RULE:
1212		return remove_fs_rule_ok((struct res_fs_rule *)res);
1213	default:
1214		return -EINVAL;
1215	}
1216}
1217
1218static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1219			 enum mlx4_resource type, int extra)
1220{
1221	u64 i;
1222	int err;
1223	struct mlx4_priv *priv = mlx4_priv(dev);
1224	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225	struct res_common *r;
1226
1227	spin_lock_irq(mlx4_tlock(dev));
1228	for (i = base; i < base + count; ++i) {
1229		r = res_tracker_lookup(&tracker->res_tree[type], i);
1230		if (!r) {
1231			err = -ENOENT;
1232			goto out;
1233		}
1234		if (r->owner != slave) {
1235			err = -EPERM;
1236			goto out;
1237		}
1238		err = remove_ok(r, type, extra);
1239		if (err)
1240			goto out;
1241	}
1242
1243	for (i = base; i < base + count; ++i) {
1244		r = res_tracker_lookup(&tracker->res_tree[type], i);
1245		rb_erase(&r->node, &tracker->res_tree[type]);
1246		list_del(&r->list);
1247		kfree(r);
1248	}
1249	err = 0;
1250
1251out:
1252	spin_unlock_irq(mlx4_tlock(dev));
1253
1254	return err;
1255}
1256
1257static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1258				enum res_qp_states state, struct res_qp **qp,
1259				int alloc)
1260{
1261	struct mlx4_priv *priv = mlx4_priv(dev);
1262	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1263	struct res_qp *r;
1264	int err = 0;
1265
1266	spin_lock_irq(mlx4_tlock(dev));
1267	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1268	if (!r)
1269		err = -ENOENT;
1270	else if (r->com.owner != slave)
1271		err = -EPERM;
1272	else {
1273		switch (state) {
1274		case RES_QP_BUSY:
1275			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1276				 __func__, r->com.res_id);
1277			err = -EBUSY;
1278			break;
1279
1280		case RES_QP_RESERVED:
1281			if (r->com.state == RES_QP_MAPPED && !alloc)
1282				break;
1283
1284			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1285			err = -EINVAL;
1286			break;
1287
1288		case RES_QP_MAPPED:
1289			if ((r->com.state == RES_QP_RESERVED && alloc) ||
1290			    r->com.state == RES_QP_HW)
1291				break;
1292			else {
1293				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1294					  r->com.res_id);
1295				err = -EINVAL;
1296			}
1297
1298			break;
1299
1300		case RES_QP_HW:
1301			if (r->com.state != RES_QP_MAPPED)
1302				err = -EINVAL;
1303			break;
1304		default:
1305			err = -EINVAL;
1306		}
1307
1308		if (!err) {
1309			r->com.from_state = r->com.state;
1310			r->com.to_state = state;
1311			r->com.state = RES_QP_BUSY;
1312			if (qp)
1313				*qp = r;
1314		}
1315	}
1316
1317	spin_unlock_irq(mlx4_tlock(dev));
1318
1319	return err;
1320}
1321
1322static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1323				enum res_mpt_states state, struct res_mpt **mpt)
1324{
1325	struct mlx4_priv *priv = mlx4_priv(dev);
1326	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1327	struct res_mpt *r;
1328	int err = 0;
1329
1330	spin_lock_irq(mlx4_tlock(dev));
1331	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1332	if (!r)
1333		err = -ENOENT;
1334	else if (r->com.owner != slave)
1335		err = -EPERM;
1336	else {
1337		switch (state) {
1338		case RES_MPT_BUSY:
1339			err = -EINVAL;
1340			break;
1341
1342		case RES_MPT_RESERVED:
1343			if (r->com.state != RES_MPT_MAPPED)
1344				err = -EINVAL;
1345			break;
1346
1347		case RES_MPT_MAPPED:
1348			if (r->com.state != RES_MPT_RESERVED &&
1349			    r->com.state != RES_MPT_HW)
1350				err = -EINVAL;
1351			break;
1352
1353		case RES_MPT_HW:
1354			if (r->com.state != RES_MPT_MAPPED)
1355				err = -EINVAL;
1356			break;
1357		default:
1358			err = -EINVAL;
1359		}
1360
1361		if (!err) {
1362			r->com.from_state = r->com.state;
1363			r->com.to_state = state;
1364			r->com.state = RES_MPT_BUSY;
1365			if (mpt)
1366				*mpt = r;
1367		}
1368	}
1369
1370	spin_unlock_irq(mlx4_tlock(dev));
1371
1372	return err;
1373}
1374
1375static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1376				enum res_eq_states state, struct res_eq **eq)
1377{
1378	struct mlx4_priv *priv = mlx4_priv(dev);
1379	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1380	struct res_eq *r;
1381	int err = 0;
1382
1383	spin_lock_irq(mlx4_tlock(dev));
1384	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1385	if (!r)
1386		err = -ENOENT;
1387	else if (r->com.owner != slave)
1388		err = -EPERM;
1389	else {
1390		switch (state) {
1391		case RES_EQ_BUSY:
1392			err = -EINVAL;
1393			break;
1394
1395		case RES_EQ_RESERVED:
1396			if (r->com.state != RES_EQ_HW)
1397				err = -EINVAL;
1398			break;
1399
1400		case RES_EQ_HW:
1401			if (r->com.state != RES_EQ_RESERVED)
1402				err = -EINVAL;
1403			break;
1404
1405		default:
1406			err = -EINVAL;
1407		}
1408
1409		if (!err) {
1410			r->com.from_state = r->com.state;
1411			r->com.to_state = state;
1412			r->com.state = RES_EQ_BUSY;
1413			if (eq)
1414				*eq = r;
1415		}
1416	}
1417
1418	spin_unlock_irq(mlx4_tlock(dev));
1419
1420	return err;
1421}
1422
1423static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1424				enum res_cq_states state, struct res_cq **cq)
1425{
1426	struct mlx4_priv *priv = mlx4_priv(dev);
1427	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1428	struct res_cq *r;
1429	int err;
1430
1431	spin_lock_irq(mlx4_tlock(dev));
1432	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1433	if (!r) {
1434		err = -ENOENT;
1435	} else if (r->com.owner != slave) {
1436		err = -EPERM;
1437	} else if (state == RES_CQ_ALLOCATED) {
1438		if (r->com.state != RES_CQ_HW)
1439			err = -EINVAL;
1440		else if (atomic_read(&r->ref_count))
1441			err = -EBUSY;
1442		else
1443			err = 0;
1444	} else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1445		err = -EINVAL;
1446	} else {
1447		err = 0;
1448	}
1449
1450	if (!err) {
1451		r->com.from_state = r->com.state;
1452		r->com.to_state = state;
1453		r->com.state = RES_CQ_BUSY;
1454		if (cq)
1455			*cq = r;
1456	}
1457
1458	spin_unlock_irq(mlx4_tlock(dev));
1459
1460	return err;
1461}
1462
1463static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1464				 enum res_srq_states state, struct res_srq **srq)
1465{
1466	struct mlx4_priv *priv = mlx4_priv(dev);
1467	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1468	struct res_srq *r;
1469	int err = 0;
1470
1471	spin_lock_irq(mlx4_tlock(dev));
1472	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1473	if (!r) {
1474		err = -ENOENT;
1475	} else if (r->com.owner != slave) {
1476		err = -EPERM;
1477	} else if (state == RES_SRQ_ALLOCATED) {
1478		if (r->com.state != RES_SRQ_HW)
1479			err = -EINVAL;
1480		else if (atomic_read(&r->ref_count))
1481			err = -EBUSY;
1482	} else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1483		err = -EINVAL;
1484	}
1485
1486	if (!err) {
1487		r->com.from_state = r->com.state;
1488		r->com.to_state = state;
1489		r->com.state = RES_SRQ_BUSY;
1490		if (srq)
1491			*srq = r;
1492	}
1493
1494	spin_unlock_irq(mlx4_tlock(dev));
1495
1496	return err;
1497}
1498
1499static void res_abort_move(struct mlx4_dev *dev, int slave,
1500			   enum mlx4_resource type, int id)
1501{
1502	struct mlx4_priv *priv = mlx4_priv(dev);
1503	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1504	struct res_common *r;
1505
1506	spin_lock_irq(mlx4_tlock(dev));
1507	r = res_tracker_lookup(&tracker->res_tree[type], id);
1508	if (r && (r->owner == slave))
1509		r->state = r->from_state;
1510	spin_unlock_irq(mlx4_tlock(dev));
1511}
1512
1513static void res_end_move(struct mlx4_dev *dev, int slave,
1514			 enum mlx4_resource type, int id)
1515{
1516	struct mlx4_priv *priv = mlx4_priv(dev);
1517	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1518	struct res_common *r;
1519
1520	spin_lock_irq(mlx4_tlock(dev));
1521	r = res_tracker_lookup(&tracker->res_tree[type], id);
1522	if (r && (r->owner == slave))
1523		r->state = r->to_state;
1524	spin_unlock_irq(mlx4_tlock(dev));
1525}
1526
1527static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1528{
1529	return mlx4_is_qp_reserved(dev, qpn) &&
1530		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1531}
1532
1533static int fw_reserved(struct mlx4_dev *dev, int qpn)
1534{
1535	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1536}
1537
1538static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1539			u64 in_param, u64 *out_param)
1540{
1541	int err;
1542	int count;
1543	int align;
1544	int base;
1545	int qpn;
1546
1547	switch (op) {
1548	case RES_OP_RESERVE:
1549		count = get_param_l(&in_param) & 0xffffff;
1550		align = get_param_h(&in_param);
1551		err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1552		if (err)
1553			return err;
1554
1555		err = __mlx4_qp_reserve_range(dev, count, align, &base);
1556		if (err) {
1557			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1558			return err;
1559		}
1560
1561		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1562		if (err) {
1563			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1564			__mlx4_qp_release_range(dev, base, count);
1565			return err;
1566		}
1567		set_param_l(out_param, base);
1568		break;
1569	case RES_OP_MAP_ICM:
1570		qpn = get_param_l(&in_param) & 0x7fffff;
1571		if (valid_reserved(dev, slave, qpn)) {
1572			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1573			if (err)
1574				return err;
1575		}
1576
1577		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1578					   NULL, 1);
1579		if (err)
1580			return err;
1581
1582		if (!fw_reserved(dev, qpn)) {
1583			err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1584			if (err) {
1585				res_abort_move(dev, slave, RES_QP, qpn);
1586				return err;
1587			}
1588		}
1589
1590		res_end_move(dev, slave, RES_QP, qpn);
1591		break;
1592
1593	default:
1594		err = -EINVAL;
1595		break;
1596	}
1597	return err;
1598}
1599
1600static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1601			 u64 in_param, u64 *out_param)
1602{
1603	int err = -EINVAL;
1604	int base;
1605	int order;
1606
1607	if (op != RES_OP_RESERVE_AND_MAP)
1608		return err;
1609
1610	order = get_param_l(&in_param);
1611
1612	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1613	if (err)
1614		return err;
1615
1616	base = __mlx4_alloc_mtt_range(dev, order);
1617	if (base == -1) {
1618		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1619		return -ENOMEM;
1620	}
1621
1622	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1623	if (err) {
1624		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1625		__mlx4_free_mtt_range(dev, base, order);
1626	} else {
1627		set_param_l(out_param, base);
1628	}
1629
1630	return err;
1631}
1632
1633static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1634			 u64 in_param, u64 *out_param)
1635{
1636	int err = -EINVAL;
1637	int index;
1638	int id;
1639	struct res_mpt *mpt;
1640
1641	switch (op) {
1642	case RES_OP_RESERVE:
1643		err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1644		if (err)
1645			break;
1646
1647		index = __mlx4_mpt_reserve(dev);
1648		if (index == -1) {
1649			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1650			break;
1651		}
1652		id = index & mpt_mask(dev);
1653
1654		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1655		if (err) {
1656			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1657			__mlx4_mpt_release(dev, index);
1658			break;
1659		}
1660		set_param_l(out_param, index);
1661		break;
1662	case RES_OP_MAP_ICM:
1663		index = get_param_l(&in_param);
1664		id = index & mpt_mask(dev);
1665		err = mr_res_start_move_to(dev, slave, id,
1666					   RES_MPT_MAPPED, &mpt);
1667		if (err)
1668			return err;
1669
1670		err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1671		if (err) {
1672			res_abort_move(dev, slave, RES_MPT, id);
1673			return err;
1674		}
1675
1676		res_end_move(dev, slave, RES_MPT, id);
1677		break;
1678	}
1679	return err;
1680}
1681
1682static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1683			u64 in_param, u64 *out_param)
1684{
1685	int cqn;
1686	int err;
1687
1688	switch (op) {
1689	case RES_OP_RESERVE_AND_MAP:
1690		err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1691		if (err)
1692			break;
1693
1694		err = __mlx4_cq_alloc_icm(dev, &cqn);
1695		if (err) {
1696			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1697			break;
1698		}
1699
1700		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1701		if (err) {
1702			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1703			__mlx4_cq_free_icm(dev, cqn);
1704			break;
1705		}
1706
1707		set_param_l(out_param, cqn);
1708		break;
1709
1710	default:
1711		err = -EINVAL;
1712	}
1713
1714	return err;
1715}
1716
1717static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1718			 u64 in_param, u64 *out_param)
1719{
1720	int srqn;
1721	int err;
1722
1723	switch (op) {
1724	case RES_OP_RESERVE_AND_MAP:
1725		err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1726		if (err)
1727			break;
1728
1729		err = __mlx4_srq_alloc_icm(dev, &srqn);
1730		if (err) {
1731			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1732			break;
1733		}
1734
1735		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1736		if (err) {
1737			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1738			__mlx4_srq_free_icm(dev, srqn);
1739			break;
1740		}
1741
1742		set_param_l(out_param, srqn);
1743		break;
1744
1745	default:
1746		err = -EINVAL;
1747	}
1748
1749	return err;
1750}
1751
1752static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1753				     u8 smac_index, u64 *mac)
1754{
1755	struct mlx4_priv *priv = mlx4_priv(dev);
1756	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1757	struct list_head *mac_list =
1758		&tracker->slave_list[slave].res_list[RES_MAC];
1759	struct mac_res *res, *tmp;
1760
1761	list_for_each_entry_safe(res, tmp, mac_list, list) {
1762		if (res->smac_index == smac_index && res->port == (u8) port) {
1763			*mac = res->mac;
1764			return 0;
1765		}
1766	}
1767	return -ENOENT;
1768}
1769
1770static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1771{
1772	struct mlx4_priv *priv = mlx4_priv(dev);
1773	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1774	struct list_head *mac_list =
1775		&tracker->slave_list[slave].res_list[RES_MAC];
1776	struct mac_res *res, *tmp;
1777
1778	list_for_each_entry_safe(res, tmp, mac_list, list) {
1779		if (res->mac == mac && res->port == (u8) port) {
1780			/* mac found. update ref count */
1781			++res->ref_count;
1782			return 0;
1783		}
1784	}
1785
1786	if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1787		return -EINVAL;
1788	res = kzalloc(sizeof *res, GFP_KERNEL);
1789	if (!res) {
1790		mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1791		return -ENOMEM;
1792	}
1793	res->mac = mac;
1794	res->port = (u8) port;
1795	res->smac_index = smac_index;
1796	res->ref_count = 1;
1797	list_add_tail(&res->list,
1798		      &tracker->slave_list[slave].res_list[RES_MAC]);
1799	return 0;
1800}
1801
1802static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1803			       int port)
1804{
1805	struct mlx4_priv *priv = mlx4_priv(dev);
1806	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1807	struct list_head *mac_list =
1808		&tracker->slave_list[slave].res_list[RES_MAC];
1809	struct mac_res *res, *tmp;
1810
1811	list_for_each_entry_safe(res, tmp, mac_list, list) {
1812		if (res->mac == mac && res->port == (u8) port) {
1813			if (!--res->ref_count) {
1814				list_del(&res->list);
1815				mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1816				kfree(res);
1817			}
1818			break;
1819		}
1820	}
1821}
1822
1823static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1824{
1825	struct mlx4_priv *priv = mlx4_priv(dev);
1826	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1827	struct list_head *mac_list =
1828		&tracker->slave_list[slave].res_list[RES_MAC];
1829	struct mac_res *res, *tmp;
1830	int i;
1831
1832	list_for_each_entry_safe(res, tmp, mac_list, list) {
1833		list_del(&res->list);
1834		/* dereference the mac the num times the slave referenced it */
1835		for (i = 0; i < res->ref_count; i++)
1836			__mlx4_unregister_mac(dev, res->port, res->mac);
1837		mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1838		kfree(res);
1839	}
1840}
1841
1842static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1843			 u64 in_param, u64 *out_param, int in_port)
1844{
1845	int err = -EINVAL;
1846	int port;
1847	u64 mac;
1848	u8 smac_index;
1849
1850	if (op != RES_OP_RESERVE_AND_MAP)
1851		return err;
1852
1853	port = !in_port ? get_param_l(out_param) : in_port;
1854	port = mlx4_slave_convert_port(
1855			dev, slave, port);
1856
1857	if (port < 0)
1858		return -EINVAL;
1859	mac = in_param;
1860
1861	err = __mlx4_register_mac(dev, port, mac);
1862	if (err >= 0) {
1863		smac_index = err;
1864		set_param_l(out_param, err);
1865		err = 0;
1866	}
1867
1868	if (!err) {
1869		err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1870		if (err)
1871			__mlx4_unregister_mac(dev, port, mac);
1872	}
1873	return err;
1874}
1875
1876static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1877			     int port, int vlan_index)
1878{
1879	struct mlx4_priv *priv = mlx4_priv(dev);
1880	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1881	struct list_head *vlan_list =
1882		&tracker->slave_list[slave].res_list[RES_VLAN];
1883	struct vlan_res *res, *tmp;
1884
1885	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1886		if (res->vlan == vlan && res->port == (u8) port) {
1887			/* vlan found. update ref count */
1888			++res->ref_count;
1889			return 0;
1890		}
1891	}
1892
1893	if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1894		return -EINVAL;
1895	res = kzalloc(sizeof(*res), GFP_KERNEL);
1896	if (!res) {
1897		mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1898		return -ENOMEM;
1899	}
1900	res->vlan = vlan;
1901	res->port = (u8) port;
1902	res->vlan_index = vlan_index;
1903	res->ref_count = 1;
1904	list_add_tail(&res->list,
1905		      &tracker->slave_list[slave].res_list[RES_VLAN]);
1906	return 0;
1907}
1908
1909
1910static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1911				int port)
1912{
1913	struct mlx4_priv *priv = mlx4_priv(dev);
1914	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1915	struct list_head *vlan_list =
1916		&tracker->slave_list[slave].res_list[RES_VLAN];
1917	struct vlan_res *res, *tmp;
1918
1919	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1920		if (res->vlan == vlan && res->port == (u8) port) {
1921			if (!--res->ref_count) {
1922				list_del(&res->list);
1923				mlx4_release_resource(dev, slave, RES_VLAN,
1924						      1, port);
1925				kfree(res);
1926			}
1927			break;
1928		}
1929	}
1930}
1931
1932static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1933{
1934	struct mlx4_priv *priv = mlx4_priv(dev);
1935	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1936	struct list_head *vlan_list =
1937		&tracker->slave_list[slave].res_list[RES_VLAN];
1938	struct vlan_res *res, *tmp;
1939	int i;
1940
1941	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1942		list_del(&res->list);
1943		/* dereference the vlan the num times the slave referenced it */
1944		for (i = 0; i < res->ref_count; i++)
1945			__mlx4_unregister_vlan(dev, res->port, res->vlan);
1946		mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1947		kfree(res);
1948	}
1949}
1950
1951static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1952			  u64 in_param, u64 *out_param, int in_port)
1953{
1954	struct mlx4_priv *priv = mlx4_priv(dev);
1955	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1956	int err;
1957	u16 vlan;
1958	int vlan_index;
1959	int port;
1960
1961	port = !in_port ? get_param_l(out_param) : in_port;
1962
1963	if (!port || op != RES_OP_RESERVE_AND_MAP)
1964		return -EINVAL;
1965
1966	port = mlx4_slave_convert_port(
1967			dev, slave, port);
1968
1969	if (port < 0)
1970		return -EINVAL;
1971	/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1972	if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1973		slave_state[slave].old_vlan_api = true;
1974		return 0;
1975	}
1976
1977	vlan = (u16) in_param;
1978
1979	err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1980	if (!err) {
1981		set_param_l(out_param, (u32) vlan_index);
1982		err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1983		if (err)
1984			__mlx4_unregister_vlan(dev, port, vlan);
1985	}
1986	return err;
1987}
1988
1989static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1990			     u64 in_param, u64 *out_param)
1991{
1992	u32 index;
1993	int err;
1994
1995	if (op != RES_OP_RESERVE)
1996		return -EINVAL;
1997
1998	err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1999	if (err)
2000		return err;
2001
2002	err = __mlx4_counter_alloc(dev, &index);
2003	if (err) {
2004		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2005		return err;
2006	}
2007
2008	err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2009	if (err) {
2010		__mlx4_counter_free(dev, index);
2011		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2012	} else {
2013		set_param_l(out_param, index);
2014	}
2015
2016	return err;
2017}
2018
2019static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2020			   u64 in_param, u64 *out_param)
2021{
2022	u32 xrcdn;
2023	int err;
2024
2025	if (op != RES_OP_RESERVE)
2026		return -EINVAL;
2027
2028	err = __mlx4_xrcd_alloc(dev, &xrcdn);
2029	if (err)
2030		return err;
2031
2032	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2033	if (err)
2034		__mlx4_xrcd_free(dev, xrcdn);
2035	else
2036		set_param_l(out_param, xrcdn);
2037
2038	return err;
2039}
2040
2041int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2042			   struct mlx4_vhcr *vhcr,
2043			   struct mlx4_cmd_mailbox *inbox,
2044			   struct mlx4_cmd_mailbox *outbox,
2045			   struct mlx4_cmd_info *cmd)
2046{
2047	int err;
2048	int alop = vhcr->op_modifier;
2049
2050	switch (vhcr->in_modifier & 0xFF) {
2051	case RES_QP:
2052		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2053				   vhcr->in_param, &vhcr->out_param);
2054		break;
2055
2056	case RES_MTT:
2057		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2058				    vhcr->in_param, &vhcr->out_param);
2059		break;
2060
2061	case RES_MPT:
2062		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2063				    vhcr->in_param, &vhcr->out_param);
2064		break;
2065
2066	case RES_CQ:
2067		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2068				   vhcr->in_param, &vhcr->out_param);
2069		break;
2070
2071	case RES_SRQ:
2072		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2073				    vhcr->in_param, &vhcr->out_param);
2074		break;
2075
2076	case RES_MAC:
2077		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2078				    vhcr->in_param, &vhcr->out_param,
2079				    (vhcr->in_modifier >> 8) & 0xFF);
2080		break;
2081
2082	case RES_VLAN:
2083		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2084				     vhcr->in_param, &vhcr->out_param,
2085				     (vhcr->in_modifier >> 8) & 0xFF);
2086		break;
2087
2088	case RES_COUNTER:
2089		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2090					vhcr->in_param, &vhcr->out_param);
2091		break;
2092
2093	case RES_XRCD:
2094		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2095				      vhcr->in_param, &vhcr->out_param);
2096		break;
2097
2098	default:
2099		err = -EINVAL;
2100		break;
2101	}
2102
2103	return err;
2104}
2105
2106static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2107		       u64 in_param)
2108{
2109	int err;
2110	int count;
2111	int base;
2112	int qpn;
2113
2114	switch (op) {
2115	case RES_OP_RESERVE:
2116		base = get_param_l(&in_param) & 0x7fffff;
2117		count = get_param_h(&in_param);
2118		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2119		if (err)
2120			break;
2121		mlx4_release_resource(dev, slave, RES_QP, count, 0);
2122		__mlx4_qp_release_range(dev, base, count);
2123		break;
2124	case RES_OP_MAP_ICM:
2125		qpn = get_param_l(&in_param) & 0x7fffff;
2126		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2127					   NULL, 0);
2128		if (err)
2129			return err;
2130
2131		if (!fw_reserved(dev, qpn))
2132			__mlx4_qp_free_icm(dev, qpn);
2133
2134		res_end_move(dev, slave, RES_QP, qpn);
2135
2136		if (valid_reserved(dev, slave, qpn))
2137			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2138		break;
2139	default:
2140		err = -EINVAL;
2141		break;
2142	}
2143	return err;
2144}
2145
2146static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2147			u64 in_param, u64 *out_param)
2148{
2149	int err = -EINVAL;
2150	int base;
2151	int order;
2152
2153	if (op != RES_OP_RESERVE_AND_MAP)
2154		return err;
2155
2156	base = get_param_l(&in_param);
2157	order = get_param_h(&in_param);
2158	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2159	if (!err) {
2160		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2161		__mlx4_free_mtt_range(dev, base, order);
2162	}
2163	return err;
2164}
2165
2166static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2167			u64 in_param)
2168{
2169	int err = -EINVAL;
2170	int index;
2171	int id;
2172	struct res_mpt *mpt;
2173
2174	switch (op) {
2175	case RES_OP_RESERVE:
2176		index = get_param_l(&in_param);
2177		id = index & mpt_mask(dev);
2178		err = get_res(dev, slave, id, RES_MPT, &mpt);
2179		if (err)
2180			break;
2181		index = mpt->key;
2182		put_res(dev, slave, id, RES_MPT);
2183
2184		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2185		if (err)
2186			break;
2187		mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2188		__mlx4_mpt_release(dev, index);
2189		break;
2190	case RES_OP_MAP_ICM:
2191			index = get_param_l(&in_param);
2192			id = index & mpt_mask(dev);
2193			err = mr_res_start_move_to(dev, slave, id,
2194						   RES_MPT_RESERVED, &mpt);
2195			if (err)
2196				return err;
2197
2198			__mlx4_mpt_free_icm(dev, mpt->key);
2199			res_end_move(dev, slave, RES_MPT, id);
2200			return err;
2201		break;
2202	default:
2203		err = -EINVAL;
2204		break;
2205	}
2206	return err;
2207}
2208
2209static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2210		       u64 in_param, u64 *out_param)
2211{
2212	int cqn;
2213	int err;
2214
2215	switch (op) {
2216	case RES_OP_RESERVE_AND_MAP:
2217		cqn = get_param_l(&in_param);
2218		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2219		if (err)
2220			break;
2221
2222		mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2223		__mlx4_cq_free_icm(dev, cqn);
2224		break;
2225
2226	default:
2227		err = -EINVAL;
2228		break;
2229	}
2230
2231	return err;
2232}
2233
2234static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2235			u64 in_param, u64 *out_param)
2236{
2237	int srqn;
2238	int err;
2239
2240	switch (op) {
2241	case RES_OP_RESERVE_AND_MAP:
2242		srqn = get_param_l(&in_param);
2243		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2244		if (err)
2245			break;
2246
2247		mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2248		__mlx4_srq_free_icm(dev, srqn);
2249		break;
2250
2251	default:
2252		err = -EINVAL;
2253		break;
2254	}
2255
2256	return err;
2257}
2258
2259static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2260			    u64 in_param, u64 *out_param, int in_port)
2261{
2262	int port;
2263	int err = 0;
2264
2265	switch (op) {
2266	case RES_OP_RESERVE_AND_MAP:
2267		port = !in_port ? get_param_l(out_param) : in_port;
2268		port = mlx4_slave_convert_port(
2269				dev, slave, port);
2270
2271		if (port < 0)
2272			return -EINVAL;
2273		mac_del_from_slave(dev, slave, in_param, port);
2274		__mlx4_unregister_mac(dev, port, in_param);
2275		break;
2276	default:
2277		err = -EINVAL;
2278		break;
2279	}
2280
2281	return err;
2282
2283}
2284
2285static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2286			    u64 in_param, u64 *out_param, int port)
2287{
2288	struct mlx4_priv *priv = mlx4_priv(dev);
2289	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2290	int err = 0;
2291
2292	port = mlx4_slave_convert_port(
2293			dev, slave, port);
2294
2295	if (port < 0)
2296		return -EINVAL;
2297	switch (op) {
2298	case RES_OP_RESERVE_AND_MAP:
2299		if (slave_state[slave].old_vlan_api)
2300			return 0;
2301		if (!port)
2302			return -EINVAL;
2303		vlan_del_from_slave(dev, slave, in_param, port);
2304		__mlx4_unregister_vlan(dev, port, in_param);
2305		break;
2306	default:
2307		err = -EINVAL;
2308		break;
2309	}
2310
2311	return err;
2312}
2313
2314static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2315			    u64 in_param, u64 *out_param)
2316{
2317	int index;
2318	int err;
2319
2320	if (op != RES_OP_RESERVE)
2321		return -EINVAL;
2322
2323	index = get_param_l(&in_param);
2324	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2325	if (err)
2326		return err;
2327
2328	__mlx4_counter_free(dev, index);
2329	mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2330
2331	return err;
2332}
2333
2334static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2335			  u64 in_param, u64 *out_param)
2336{
2337	int xrcdn;
2338	int err;
2339
2340	if (op != RES_OP_RESERVE)
2341		return -EINVAL;
2342
2343	xrcdn = get_param_l(&in_param);
2344	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2345	if (err)
2346		return err;
2347
2348	__mlx4_xrcd_free(dev, xrcdn);
2349
2350	return err;
2351}
2352
2353int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2354			  struct mlx4_vhcr *vhcr,
2355			  struct mlx4_cmd_mailbox *inbox,
2356			  struct mlx4_cmd_mailbox *outbox,
2357			  struct mlx4_cmd_info *cmd)
2358{
2359	int err = -EINVAL;
2360	int alop = vhcr->op_modifier;
2361
2362	switch (vhcr->in_modifier & 0xFF) {
2363	case RES_QP:
2364		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2365				  vhcr->in_param);
2366		break;
2367
2368	case RES_MTT:
2369		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2370				   vhcr->in_param, &vhcr->out_param);
2371		break;
2372
2373	case RES_MPT:
2374		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2375				   vhcr->in_param);
2376		break;
2377
2378	case RES_CQ:
2379		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2380				  vhcr->in_param, &vhcr->out_param);
2381		break;
2382
2383	case RES_SRQ:
2384		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2385				   vhcr->in_param, &vhcr->out_param);
2386		break;
2387
2388	case RES_MAC:
2389		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2390				   vhcr->in_param, &vhcr->out_param,
2391				   (vhcr->in_modifier >> 8) & 0xFF);
2392		break;
2393
2394	case RES_VLAN:
2395		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2396				    vhcr->in_param, &vhcr->out_param,
2397				    (vhcr->in_modifier >> 8) & 0xFF);
2398		break;
2399
2400	case RES_COUNTER:
2401		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2402				       vhcr->in_param, &vhcr->out_param);
2403		break;
2404
2405	case RES_XRCD:
2406		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2407				     vhcr->in_param, &vhcr->out_param);
2408
2409	default:
2410		break;
2411	}
2412	return err;
2413}
2414
2415/* ugly but other choices are uglier */
2416static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2417{
2418	return (be32_to_cpu(mpt->flags) >> 9) & 1;
2419}
2420
2421static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2422{
2423	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2424}
2425
2426static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2427{
2428	return be32_to_cpu(mpt->mtt_sz);
2429}
2430
2431static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2432{
2433	return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2434}
2435
2436static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2437{
2438	return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2439}
2440
2441static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2442{
2443	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2444}
2445
2446static int mr_is_region(struct mlx4_mpt_entry *mpt)
2447{
2448	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2449}
2450
2451static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2452{
2453	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2454}
2455
2456static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2457{
2458	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2459}
2460
2461static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2462{
2463	int page_shift = (qpc->log_page_size & 0x3f) + 12;
2464	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2465	int log_sq_sride = qpc->sq_size_stride & 7;
2466	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2467	int log_rq_stride = qpc->rq_size_stride & 7;
2468	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2469	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2470	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2471	int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2472	int sq_size;
2473	int rq_size;
2474	int total_pages;
2475	int total_mem;
2476	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2477
2478	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2479	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2480	total_mem = sq_size + rq_size;
2481	total_pages =
2482		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2483				   page_shift);
2484
2485	return total_pages;
2486}
2487
2488static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2489			   int size, struct res_mtt *mtt)
2490{
2491	int res_start = mtt->com.res_id;
2492	int res_size = (1 << mtt->order);
2493
2494	if (start < res_start || start + size > res_start + res_size)
2495		return -EPERM;
2496	return 0;
2497}
2498
2499int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2500			   struct mlx4_vhcr *vhcr,
2501			   struct mlx4_cmd_mailbox *inbox,
2502			   struct mlx4_cmd_mailbox *outbox,
2503			   struct mlx4_cmd_info *cmd)
2504{
2505	int err;
2506	int index = vhcr->in_modifier;
2507	struct res_mtt *mtt;
2508	struct res_mpt *mpt;
2509	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2510	int phys;
2511	int id;
2512	u32 pd;
2513	int pd_slave;
2514
2515	id = index & mpt_mask(dev);
2516	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2517	if (err)
2518		return err;
2519
2520	/* Disable memory windows for VFs. */
2521	if (!mr_is_region(inbox->buf)) {
2522		err = -EPERM;
2523		goto ex_abort;
2524	}
2525
2526	/* Make sure that the PD bits related to the slave id are zeros. */
2527	pd = mr_get_pd(inbox->buf);
2528	pd_slave = (pd >> 17) & 0x7f;
2529	if (pd_slave != 0 && pd_slave != slave) {
2530		err = -EPERM;
2531		goto ex_abort;
2532	}
2533
2534	if (mr_is_fmr(inbox->buf)) {
2535		/* FMR and Bind Enable are forbidden in slave devices. */
2536		if (mr_is_bind_enabled(inbox->buf)) {
2537			err = -EPERM;
2538			goto ex_abort;
2539		}
2540		/* FMR and Memory Windows are also forbidden. */
2541		if (!mr_is_region(inbox->buf)) {
2542			err = -EPERM;
2543			goto ex_abort;
2544		}
2545	}
2546
2547	phys = mr_phys_mpt(inbox->buf);
2548	if (!phys) {
2549		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2550		if (err)
2551			goto ex_abort;
2552
2553		err = check_mtt_range(dev, slave, mtt_base,
2554				      mr_get_mtt_size(inbox->buf), mtt);
2555		if (err)
2556			goto ex_put;
2557
2558		mpt->mtt = mtt;
2559	}
2560
2561	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2562	if (err)
2563		goto ex_put;
2564
2565	if (!phys) {
2566		atomic_inc(&mtt->ref_count);
2567		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2568	}
2569
2570	res_end_move(dev, slave, RES_MPT, id);
2571	return 0;
2572
2573ex_put:
2574	if (!phys)
2575		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2576ex_abort:
2577	res_abort_move(dev, slave, RES_MPT, id);
2578
2579	return err;
2580}
2581
2582int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2583			   struct mlx4_vhcr *vhcr,
2584			   struct mlx4_cmd_mailbox *inbox,
2585			   struct mlx4_cmd_mailbox *outbox,
2586			   struct mlx4_cmd_info *cmd)
2587{
2588	int err;
2589	int index = vhcr->in_modifier;
2590	struct res_mpt *mpt;
2591	int id;
2592
2593	id = index & mpt_mask(dev);
2594	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2595	if (err)
2596		return err;
2597
2598	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2599	if (err)
2600		goto ex_abort;
2601
2602	if (mpt->mtt)
2603		atomic_dec(&mpt->mtt->ref_count);
2604
2605	res_end_move(dev, slave, RES_MPT, id);
2606	return 0;
2607
2608ex_abort:
2609	res_abort_move(dev, slave, RES_MPT, id);
2610
2611	return err;
2612}
2613
2614int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2615			   struct mlx4_vhcr *vhcr,
2616			   struct mlx4_cmd_mailbox *inbox,
2617			   struct mlx4_cmd_mailbox *outbox,
2618			   struct mlx4_cmd_info *cmd)
2619{
2620	int err;
2621	int index = vhcr->in_modifier;
2622	struct res_mpt *mpt;
2623	int id;
2624
2625	id = index & mpt_mask(dev);
2626	err = get_res(dev, slave, id, RES_MPT, &mpt);
2627	if (err)
2628		return err;
2629
2630	if (mpt->com.from_state == RES_MPT_MAPPED) {
2631		/* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2632		 * that, the VF must read the MPT. But since the MPT entry memory is not
2633		 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2634		 * entry contents. To guarantee that the MPT cannot be changed, the driver
2635		 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2636		 * ownership fofollowing the change. The change here allows the VF to
2637		 * perform QUERY_MPT also when the entry is in SW ownership.
2638		 */
2639		struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2640					&mlx4_priv(dev)->mr_table.dmpt_table,
2641					mpt->key, NULL);
2642
2643		if (NULL == mpt_entry || NULL == outbox->buf) {
2644			err = -EINVAL;
2645			goto out;
2646		}
2647
2648		memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2649
2650		err = 0;
2651	} else if (mpt->com.from_state == RES_MPT_HW) {
2652		err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2653	} else {
2654		err = -EBUSY;
2655		goto out;
2656	}
2657
2658
2659out:
2660	put_res(dev, slave, id, RES_MPT);
2661	return err;
2662}
2663
2664static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2665{
2666	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2667}
2668
2669static int qp_get_scqn(struct mlx4_qp_context *qpc)
2670{
2671	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2672}
2673
2674static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2675{
2676	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2677}
2678
2679static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2680				  struct mlx4_qp_context *context)
2681{
2682	u32 qpn = vhcr->in_modifier & 0xffffff;
2683	u32 qkey = 0;
2684
2685	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2686		return;
2687
2688	/* adjust qkey in qp context */
2689	context->qkey = cpu_to_be32(qkey);
2690}
2691
2692int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2693			     struct mlx4_vhcr *vhcr,
2694			     struct mlx4_cmd_mailbox *inbox,
2695			     struct mlx4_cmd_mailbox *outbox,
2696			     struct mlx4_cmd_info *cmd)
2697{
2698	int err;
2699	int qpn = vhcr->in_modifier & 0x7fffff;
2700	struct res_mtt *mtt;
2701	struct res_qp *qp;
2702	struct mlx4_qp_context *qpc = inbox->buf + 8;
2703	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2704	int mtt_size = qp_get_mtt_size(qpc);
2705	struct res_cq *rcq;
2706	struct res_cq *scq;
2707	int rcqn = qp_get_rcqn(qpc);
2708	int scqn = qp_get_scqn(qpc);
2709	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2710	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2711	struct res_srq *srq;
2712	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2713
2714	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2715	if (err)
2716		return err;
2717	qp->local_qpn = local_qpn;
2718	qp->sched_queue = 0;
2719	qp->param3 = 0;
2720	qp->vlan_control = 0;
2721	qp->fvl_rx = 0;
2722	qp->pri_path_fl = 0;
2723	qp->vlan_index = 0;
2724	qp->feup = 0;
2725	qp->qpc_flags = be32_to_cpu(qpc->flags);
2726
2727	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2728	if (err)
2729		goto ex_abort;
2730
2731	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2732	if (err)
2733		goto ex_put_mtt;
2734
2735	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2736	if (err)
2737		goto ex_put_mtt;
2738
2739	if (scqn != rcqn) {
2740		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2741		if (err)
2742			goto ex_put_rcq;
2743	} else
2744		scq = rcq;
2745
2746	if (use_srq) {
2747		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2748		if (err)
2749			goto ex_put_scq;
2750	}
2751
2752	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2753	update_pkey_index(dev, slave, inbox);
2754	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2755	if (err)
2756		goto ex_put_srq;
2757	atomic_inc(&mtt->ref_count);
2758	qp->mtt = mtt;
2759	atomic_inc(&rcq->ref_count);
2760	qp->rcq = rcq;
2761	atomic_inc(&scq->ref_count);
2762	qp->scq = scq;
2763
2764	if (scqn != rcqn)
2765		put_res(dev, slave, scqn, RES_CQ);
2766
2767	if (use_srq) {
2768		atomic_inc(&srq->ref_count);
2769		put_res(dev, slave, srqn, RES_SRQ);
2770		qp->srq = srq;
2771	}
2772	put_res(dev, slave, rcqn, RES_CQ);
2773	put_res(dev, slave, mtt_base, RES_MTT);
2774	res_end_move(dev, slave, RES_QP, qpn);
2775
2776	return 0;
2777
2778ex_put_srq:
2779	if (use_srq)
2780		put_res(dev, slave, srqn, RES_SRQ);
2781ex_put_scq:
2782	if (scqn != rcqn)
2783		put_res(dev, slave, scqn, RES_CQ);
2784ex_put_rcq:
2785	put_res(dev, slave, rcqn, RES_CQ);
2786ex_put_mtt:
2787	put_res(dev, slave, mtt_base, RES_MTT);
2788ex_abort:
2789	res_abort_move(dev, slave, RES_QP, qpn);
2790
2791	return err;
2792}
2793
2794static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2795{
2796	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2797}
2798
2799static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2800{
2801	int log_eq_size = eqc->log_eq_size & 0x1f;
2802	int page_shift = (eqc->log_page_size & 0x3f) + 12;
2803
2804	if (log_eq_size + 5 < page_shift)
2805		return 1;
2806
2807	return 1 << (log_eq_size + 5 - page_shift);
2808}
2809
2810static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2811{
2812	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2813}
2814
2815static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2816{
2817	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2818	int page_shift = (cqc->log_page_size & 0x3f) + 12;
2819
2820	if (log_cq_size + 5 < page_shift)
2821		return 1;
2822
2823	return 1 << (log_cq_size + 5 - page_shift);
2824}
2825
2826int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2827			  struct mlx4_vhcr *vhcr,
2828			  struct mlx4_cmd_mailbox *inbox,
2829			  struct mlx4_cmd_mailbox *outbox,
2830			  struct mlx4_cmd_info *cmd)
2831{
2832	int err;
2833	int eqn = vhcr->in_modifier;
2834	int res_id = (slave << 8) | eqn;
2835	struct mlx4_eq_context *eqc = inbox->buf;
2836	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2837	int mtt_size = eq_get_mtt_size(eqc);
2838	struct res_eq *eq;
2839	struct res_mtt *mtt;
2840
2841	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2842	if (err)
2843		return err;
2844	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2845	if (err)
2846		goto out_add;
2847
2848	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2849	if (err)
2850		goto out_move;
2851
2852	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2853	if (err)
2854		goto out_put;
2855
2856	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2857	if (err)
2858		goto out_put;
2859
2860	atomic_inc(&mtt->ref_count);
2861	eq->mtt = mtt;
2862	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2863	res_end_move(dev, slave, RES_EQ, res_id);
2864	return 0;
2865
2866out_put:
2867	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2868out_move:
2869	res_abort_move(dev, slave, RES_EQ, res_id);
2870out_add:
2871	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2872	return err;
2873}
2874
2875static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2876			      int len, struct res_mtt **res)
2877{
2878	struct mlx4_priv *priv = mlx4_priv(dev);
2879	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2880	struct res_mtt *mtt;
2881	int err = -EINVAL;
2882
2883	spin_lock_irq(mlx4_tlock(dev));
2884	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2885			    com.list) {
2886		if (!check_mtt_range(dev, slave, start, len, mtt)) {
2887			*res = mtt;
2888			mtt->com.from_state = mtt->com.state;
2889			mtt->com.state = RES_MTT_BUSY;
2890			err = 0;
2891			break;
2892		}
2893	}
2894	spin_unlock_irq(mlx4_tlock(dev));
2895
2896	return err;
2897}
2898
2899static int verify_qp_parameters(struct mlx4_dev *dev,
2900				struct mlx4_vhcr *vhcr,
2901				struct mlx4_cmd_mailbox *inbox,
2902				enum qp_transition transition, u8 slave)
2903{
2904	u32			qp_type;
2905	u32			qpn;
2906	struct mlx4_qp_context	*qp_ctx;
2907	enum mlx4_qp_optpar	optpar;
2908	int port;
2909	int num_gids;
2910
2911	qp_ctx  = inbox->buf + 8;
2912	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2913	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
2914
2915	switch (qp_type) {
2916	case MLX4_QP_ST_RC:
2917	case MLX4_QP_ST_XRC:
2918	case MLX4_QP_ST_UC:
2919		switch (transition) {
2920		case QP_TRANS_INIT2RTR:
2921		case QP_TRANS_RTR2RTS:
2922		case QP_TRANS_RTS2RTS:
2923		case QP_TRANS_SQD2SQD:
2924		case QP_TRANS_SQD2RTS:
2925			if (slave != mlx4_master_func_num(dev))
2926				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2927					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2928					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2929						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2930					else
2931						num_gids = 1;
2932					if (qp_ctx->pri_path.mgid_index >= num_gids)
2933						return -EINVAL;
2934				}
2935				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2936					port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2937					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2938						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2939					else
2940						num_gids = 1;
2941					if (qp_ctx->alt_path.mgid_index >= num_gids)
2942						return -EINVAL;
2943				}
2944			break;
2945		default:
2946			break;
2947		}
2948		break;
2949
2950	case MLX4_QP_ST_MLX:
2951		qpn = vhcr->in_modifier & 0x7fffff;
2952		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2953		if (transition == QP_TRANS_INIT2RTR &&
2954		    slave != mlx4_master_func_num(dev) &&
2955		    mlx4_is_qp_reserved(dev, qpn) &&
2956		    !mlx4_vf_smi_enabled(dev, slave, port)) {
2957			/* only enabled VFs may create MLX proxy QPs */
2958			mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2959				 __func__, slave, port);
2960			return -EPERM;
2961		}
2962		break;
2963
2964	default:
2965		break;
2966	}
2967
2968	return 0;
2969}
2970
2971int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2972			   struct mlx4_vhcr *vhcr,
2973			   struct mlx4_cmd_mailbox *inbox,
2974			   struct mlx4_cmd_mailbox *outbox,
2975			   struct mlx4_cmd_info *cmd)
2976{
2977	struct mlx4_mtt mtt;
2978	__be64 *page_list = inbox->buf;
2979	u64 *pg_list = (u64 *)page_list;
2980	int i;
2981	struct res_mtt *rmtt = NULL;
2982	int start = be64_to_cpu(page_list[0]);
2983	int npages = vhcr->in_modifier;
2984	int err;
2985
2986	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2987	if (err)
2988		return err;
2989
2990	/* Call the SW implementation of write_mtt:
2991	 * - Prepare a dummy mtt struct
2992	 * - Translate inbox contents to simple addresses in host endianess */
2993	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2994			    we don't really use it */
2995	mtt.order = 0;
2996	mtt.page_shift = 0;
2997	for (i = 0; i < npages; ++i)
2998		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2999
3000	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3001			       ((u64 *)page_list + 2));
3002
3003	if (rmtt)
3004		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3005
3006	return err;
3007}
3008
3009int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3010			  struct mlx4_vhcr *vhcr,
3011			  struct mlx4_cmd_mailbox *inbox,
3012			  struct mlx4_cmd_mailbox *outbox,
3013			  struct mlx4_cmd_info *cmd)
3014{
3015	int eqn = vhcr->in_modifier;
3016	int res_id = eqn | (slave << 8);
3017	struct res_eq *eq;
3018	int err;
3019
3020	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3021	if (err)
3022		return err;
3023
3024	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3025	if (err)
3026		goto ex_abort;
3027
3028	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3029	if (err)
3030		goto ex_put;
3031
3032	atomic_dec(&eq->mtt->ref_count);
3033	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3034	res_end_move(dev, slave, RES_EQ, res_id);
3035	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3036
3037	return 0;
3038
3039ex_put:
3040	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3041ex_abort:
3042	res_abort_move(dev, slave, RES_EQ, res_id);
3043
3044	return err;
3045}
3046
3047int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3048{
3049	struct mlx4_priv *priv = mlx4_priv(dev);
3050	struct mlx4_slave_event_eq_info *event_eq;
3051	struct mlx4_cmd_mailbox *mailbox;
3052	u32 in_modifier = 0;
3053	int err;
3054	int res_id;
3055	struct res_eq *req;
3056
3057	if (!priv->mfunc.master.slave_state)
3058		return -EINVAL;
3059
3060	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3061
3062	/* Create the event only if the slave is registered */
3063	if (event_eq->eqn < 0)
3064		return 0;
3065
3066	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3067	res_id = (slave << 8) | event_eq->eqn;
3068	err = get_res(dev, slave, res_id, RES_EQ, &req);
3069	if (err)
3070		goto unlock;
3071
3072	if (req->com.from_state != RES_EQ_HW) {
3073		err = -EINVAL;
3074		goto put;
3075	}
3076
3077	mailbox = mlx4_alloc_cmd_mailbox(dev);
3078	if (IS_ERR(mailbox)) {
3079		err = PTR_ERR(mailbox);
3080		goto put;
3081	}
3082
3083	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3084		++event_eq->token;
3085		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3086	}
3087
3088	memcpy(mailbox->buf, (u8 *) eqe, 28);
3089
3090	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3091
3092	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3093		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3094		       MLX4_CMD_NATIVE);
3095
3096	put_res(dev, slave, res_id, RES_EQ);
3097	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3098	mlx4_free_cmd_mailbox(dev, mailbox);
3099	return err;
3100
3101put:
3102	put_res(dev, slave, res_id, RES_EQ);
3103
3104unlock:
3105	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3106	return err;
3107}
3108
3109int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3110			  struct mlx4_vhcr *vhcr,
3111			  struct mlx4_cmd_mailbox *inbox,
3112			  struct mlx4_cmd_mailbox *outbox,
3113			  struct mlx4_cmd_info *cmd)
3114{
3115	int eqn = vhcr->in_modifier;
3116	int res_id = eqn | (slave << 8);
3117	struct res_eq *eq;
3118	int err;
3119
3120	err = get_res(dev, slave, res_id, RES_EQ, &eq);
3121	if (err)
3122		return err;
3123
3124	if (eq->com.from_state != RES_EQ_HW) {
3125		err = -EINVAL;
3126		goto ex_put;
3127	}
3128
3129	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3130
3131ex_put:
3132	put_res(dev, slave, res_id, RES_EQ);
3133	return err;
3134}
3135
3136int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3137			  struct mlx4_vhcr *vhcr,
3138			  struct mlx4_cmd_mailbox *inbox,
3139			  struct mlx4_cmd_mailbox *outbox,
3140			  struct mlx4_cmd_info *cmd)
3141{
3142	int err;
3143	int cqn = vhcr->in_modifier;
3144	struct mlx4_cq_context *cqc = inbox->buf;
3145	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3146	struct res_cq *cq;
3147	struct res_mtt *mtt;
3148
3149	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3150	if (err)
3151		return err;
3152	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3153	if (err)
3154		goto out_move;
3155	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3156	if (err)
3157		goto out_put;
3158	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3159	if (err)
3160		goto out_put;
3161	atomic_inc(&mtt->ref_count);
3162	cq->mtt = mtt;
3163	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3164	res_end_move(dev, slave, RES_CQ, cqn);
3165	return 0;
3166
3167out_put:
3168	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3169out_move:
3170	res_abort_move(dev, slave, RES_CQ, cqn);
3171	return err;
3172}
3173
3174int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3175			  struct mlx4_vhcr *vhcr,
3176			  struct mlx4_cmd_mailbox *inbox,
3177			  struct mlx4_cmd_mailbox *outbox,
3178			  struct mlx4_cmd_info *cmd)
3179{
3180	int err;
3181	int cqn = vhcr->in_modifier;
3182	struct res_cq *cq;
3183
3184	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3185	if (err)
3186		return err;
3187	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3188	if (err)
3189		goto out_move;
3190	atomic_dec(&cq->mtt->ref_count);
3191	res_end_move(dev, slave, RES_CQ, cqn);
3192	return 0;
3193
3194out_move:
3195	res_abort_move(dev, slave, RES_CQ, cqn);
3196	return err;
3197}
3198
3199int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3200			  struct mlx4_vhcr *vhcr,
3201			  struct mlx4_cmd_mailbox *inbox,
3202			  struct mlx4_cmd_mailbox *outbox,
3203			  struct mlx4_cmd_info *cmd)
3204{
3205	int cqn = vhcr->in_modifier;
3206	struct res_cq *cq;
3207	int err;
3208
3209	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3210	if (err)
3211		return err;
3212
3213	if (cq->com.from_state != RES_CQ_HW)
3214		goto ex_put;
3215
3216	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3217ex_put:
3218	put_res(dev, slave, cqn, RES_CQ);
3219
3220	return err;
3221}
3222
3223static int handle_resize(struct mlx4_dev *dev, int slave,
3224			 struct mlx4_vhcr *vhcr,
3225			 struct mlx4_cmd_mailbox *inbox,
3226			 struct mlx4_cmd_mailbox *outbox,
3227			 struct mlx4_cmd_info *cmd,
3228			 struct res_cq *cq)
3229{
3230	int err;
3231	struct res_mtt *orig_mtt;
3232	struct res_mtt *mtt;
3233	struct mlx4_cq_context *cqc = inbox->buf;
3234	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3235
3236	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3237	if (err)
3238		return err;
3239
3240	if (orig_mtt != cq->mtt) {
3241		err = -EINVAL;
3242		goto ex_put;
3243	}
3244
3245	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3246	if (err)
3247		goto ex_put;
3248
3249	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3250	if (err)
3251		goto ex_put1;
3252	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3253	if (err)
3254		goto ex_put1;
3255	atomic_dec(&orig_mtt->ref_count);
3256	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3257	atomic_inc(&mtt->ref_count);
3258	cq->mtt = mtt;
3259	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3260	return 0;
3261
3262ex_put1:
3263	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3264ex_put:
3265	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3266
3267	return err;
3268
3269}
3270
3271int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3272			   struct mlx4_vhcr *vhcr,
3273			   struct mlx4_cmd_mailbox *inbox,
3274			   struct mlx4_cmd_mailbox *outbox,
3275			   struct mlx4_cmd_info *cmd)
3276{
3277	int cqn = vhcr->in_modifier;
3278	struct res_cq *cq;
3279	int err;
3280
3281	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3282	if (err)
3283		return err;
3284
3285	if (cq->com.from_state != RES_CQ_HW)
3286		goto ex_put;
3287
3288	if (vhcr->op_modifier == 0) {
3289		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3290		goto ex_put;
3291	}
3292
3293	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3294ex_put:
3295	put_res(dev, slave, cqn, RES_CQ);
3296
3297	return err;
3298}
3299
3300static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3301{
3302	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3303	int log_rq_stride = srqc->logstride & 7;
3304	int page_shift = (srqc->log_page_size & 0x3f) + 12;
3305
3306	if (log_srq_size + log_rq_stride + 4 < page_shift)
3307		return 1;
3308
3309	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3310}
3311
3312int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3313			   struct mlx4_vhcr *vhcr,
3314			   struct mlx4_cmd_mailbox *inbox,
3315			   struct mlx4_cmd_mailbox *outbox,
3316			   struct mlx4_cmd_info *cmd)
3317{
3318	int err;
3319	int srqn = vhcr->in_modifier;
3320	struct res_mtt *mtt;
3321	struct res_srq *srq;
3322	struct mlx4_srq_context *srqc = inbox->buf;
3323	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3324
3325	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3326		return -EINVAL;
3327
3328	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3329	if (err)
3330		return err;
3331	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3332	if (err)
3333		goto ex_abort;
3334	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3335			      mtt);
3336	if (err)
3337		goto ex_put_mtt;
3338
3339	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3340	if (err)
3341		goto ex_put_mtt;
3342
3343	atomic_inc(&mtt->ref_count);
3344	srq->mtt = mtt;
3345	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3346	res_end_move(dev, slave, RES_SRQ, srqn);
3347	return 0;
3348
3349ex_put_mtt:
3350	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3351ex_abort:
3352	res_abort_move(dev, slave, RES_SRQ, srqn);
3353
3354	return err;
3355}
3356
3357int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3358			   struct mlx4_vhcr *vhcr,
3359			   struct mlx4_cmd_mailbox *inbox,
3360			   struct mlx4_cmd_mailbox *outbox,
3361			   struct mlx4_cmd_info *cmd)
3362{
3363	int err;
3364	int srqn = vhcr->in_modifier;
3365	struct res_srq *srq;
3366
3367	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3368	if (err)
3369		return err;
3370	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3371	if (err)
3372		goto ex_abort;
3373	atomic_dec(&srq->mtt->ref_count);
3374	if (srq->cq)
3375		atomic_dec(&srq->cq->ref_count);
3376	res_end_move(dev, slave, RES_SRQ, srqn);
3377
3378	return 0;
3379
3380ex_abort:
3381	res_abort_move(dev, slave, RES_SRQ, srqn);
3382
3383	return err;
3384}
3385
3386int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3387			   struct mlx4_vhcr *vhcr,
3388			   struct mlx4_cmd_mailbox *inbox,
3389			   struct mlx4_cmd_mailbox *outbox,
3390			   struct mlx4_cmd_info *cmd)
3391{
3392	int err;
3393	int srqn = vhcr->in_modifier;
3394	struct res_srq *srq;
3395
3396	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3397	if (err)
3398		return err;
3399	if (srq->com.from_state != RES_SRQ_HW) {
3400		err = -EBUSY;
3401		goto out;
3402	}
3403	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3404out:
3405	put_res(dev, slave, srqn, RES_SRQ);
3406	return err;
3407}
3408
3409int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3410			 struct mlx4_vhcr *vhcr,
3411			 struct mlx4_cmd_mailbox *inbox,
3412			 struct mlx4_cmd_mailbox *outbox,
3413			 struct mlx4_cmd_info *cmd)
3414{
3415	int err;
3416	int srqn = vhcr->in_modifier;
3417	struct res_srq *srq;
3418
3419	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3420	if (err)
3421		return err;
3422
3423	if (srq->com.from_state != RES_SRQ_HW) {
3424		err = -EBUSY;
3425		goto out;
3426	}
3427
3428	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3429out:
3430	put_res(dev, slave, srqn, RES_SRQ);
3431	return err;
3432}
3433
3434int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3435			struct mlx4_vhcr *vhcr,
3436			struct mlx4_cmd_mailbox *inbox,
3437			struct mlx4_cmd_mailbox *outbox,
3438			struct mlx4_cmd_info *cmd)
3439{
3440	int err;
3441	int qpn = vhcr->in_modifier & 0x7fffff;
3442	struct res_qp *qp;
3443
3444	err = get_res(dev, slave, qpn, RES_QP, &qp);
3445	if (err)
3446		return err;
3447	if (qp->com.from_state != RES_QP_HW) {
3448		err = -EBUSY;
3449		goto out;
3450	}
3451
3452	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3453out:
3454	put_res(dev, slave, qpn, RES_QP);
3455	return err;
3456}
3457
3458int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3459			      struct mlx4_vhcr *vhcr,
3460			      struct mlx4_cmd_mailbox *inbox,
3461			      struct mlx4_cmd_mailbox *outbox,
3462			      struct mlx4_cmd_info *cmd)
3463{
3464	struct mlx4_qp_context *context = inbox->buf + 8;
3465	adjust_proxy_tun_qkey(dev, vhcr, context);
3466	update_pkey_index(dev, slave, inbox);
3467	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3468}
3469
3470static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3471				  struct mlx4_qp_context *qpc,
3472				  struct mlx4_cmd_mailbox *inbox)
3473{
3474	enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3475	u8 pri_sched_queue;
3476	int port = mlx4_slave_convert_port(
3477		   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3478
3479	if (port < 0)
3480		return -EINVAL;
3481
3482	pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3483			  ((port & 1) << 6);
3484
3485	if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3486	    mlx4_is_eth(dev, port + 1)) {
3487		qpc->pri_path.sched_queue = pri_sched_queue;
3488	}
3489
3490	if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3491		port = mlx4_slave_convert_port(
3492				dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3493				+ 1) - 1;
3494		if (port < 0)
3495			return -EINVAL;
3496		qpc->alt_path.sched_queue =
3497			(qpc->alt_path.sched_queue & ~(1 << 6)) |
3498			(port & 1) << 6;
3499	}
3500	return 0;
3501}
3502
3503static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3504				struct mlx4_qp_context *qpc,
3505				struct mlx4_cmd_mailbox *inbox)
3506{
3507	u64 mac;
3508	int port;
3509	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3510	u8 sched = *(u8 *)(inbox->buf + 64);
3511	u8 smac_ix;
3512
3513	port = (sched >> 6 & 1) + 1;
3514	if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3515		smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3516		if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3517			return -ENOENT;
3518	}
3519	return 0;
3520}
3521
3522int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3523			     struct mlx4_vhcr *vhcr,
3524			     struct mlx4_cmd_mailbox *inbox,
3525			     struct mlx4_cmd_mailbox *outbox,
3526			     struct mlx4_cmd_info *cmd)
3527{
3528	int err;
3529	struct mlx4_qp_context *qpc = inbox->buf + 8;
3530	int qpn = vhcr->in_modifier & 0x7fffff;
3531	struct res_qp *qp;
3532	u8 orig_sched_queue;
3533	__be32	orig_param3 = qpc->param3;
3534	u8 orig_vlan_control = qpc->pri_path.vlan_control;
3535	u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3536	u8 orig_pri_path_fl = qpc->pri_path.fl;
3537	u8 orig_vlan_index = qpc->pri_path.vlan_index;
3538	u8 orig_feup = qpc->pri_path.feup;
3539
3540	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3541	if (err)
3542		return err;
3543	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3544	if (err)
3545		return err;
3546
3547	if (roce_verify_mac(dev, slave, qpc, inbox))
3548		return -EINVAL;
3549
3550	update_pkey_index(dev, slave, inbox);
3551	update_gid(dev, inbox, (u8)slave);
3552	adjust_proxy_tun_qkey(dev, vhcr, qpc);
3553	orig_sched_queue = qpc->pri_path.sched_queue;
3554	err = update_vport_qp_param(dev, inbox, slave, qpn);
3555	if (err)
3556		return err;
3557
3558	err = get_res(dev, slave, qpn, RES_QP, &qp);
3559	if (err)
3560		return err;
3561	if (qp->com.from_state != RES_QP_HW) {
3562		err = -EBUSY;
3563		goto out;
3564	}
3565
3566	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3567out:
3568	/* if no error, save sched queue value passed in by VF. This is
3569	 * essentially the QOS value provided by the VF. This will be useful
3570	 * if we allow dynamic changes from VST back to VGT
3571	 */
3572	if (!err) {
3573		qp->sched_queue = orig_sched_queue;
3574		qp->param3	= orig_param3;
3575		qp->vlan_control = orig_vlan_control;
3576		qp->fvl_rx	=  orig_fvl_rx;
3577		qp->pri_path_fl = orig_pri_path_fl;
3578		qp->vlan_index  = orig_vlan_index;
3579		qp->feup	= orig_feup;
3580	}
3581	put_res(dev, slave, qpn, RES_QP);
3582	return err;
3583}
3584
3585int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3586			    struct mlx4_vhcr *vhcr,
3587			    struct mlx4_cmd_mailbox *inbox,
3588			    struct mlx4_cmd_mailbox *outbox,
3589			    struct mlx4_cmd_info *cmd)
3590{
3591	int err;
3592	struct mlx4_qp_context *context = inbox->buf + 8;
3593
3594	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3595	if (err)
3596		return err;
3597	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3598	if (err)
3599		return err;
3600
3601	update_pkey_index(dev, slave, inbox);
3602	update_gid(dev, inbox, (u8)slave);
3603	adjust_proxy_tun_qkey(dev, vhcr, context);
3604	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3605}
3606
3607int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3608			    struct mlx4_vhcr *vhcr,
3609			    struct mlx4_cmd_mailbox *inbox,
3610			    struct mlx4_cmd_mailbox *outbox,
3611			    struct mlx4_cmd_info *cmd)
3612{
3613	int err;
3614	struct mlx4_qp_context *context = inbox->buf + 8;
3615
3616	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3617	if (err)
3618		return err;
3619	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3620	if (err)
3621		return err;
3622
3623	update_pkey_index(dev, slave, inbox);
3624	update_gid(dev, inbox, (u8)slave);
3625	adjust_proxy_tun_qkey(dev, vhcr, context);
3626	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3627}
3628
3629
3630int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3631			      struct mlx4_vhcr *vhcr,
3632			      struct mlx4_cmd_mailbox *inbox,
3633			      struct mlx4_cmd_mailbox *outbox,
3634			      struct mlx4_cmd_info *cmd)
3635{
3636	struct mlx4_qp_context *context = inbox->buf + 8;
3637	int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3638	if (err)
3639		return err;
3640	adjust_proxy_tun_qkey(dev, vhcr, context);
3641	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3642}
3643
3644int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3645			    struct mlx4_vhcr *vhcr,
3646			    struct mlx4_cmd_mailbox *inbox,
3647			    struct mlx4_cmd_mailbox *outbox,
3648			    struct mlx4_cmd_info *cmd)
3649{
3650	int err;
3651	struct mlx4_qp_context *context = inbox->buf + 8;
3652
3653	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3654	if (err)
3655		return err;
3656	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3657	if (err)
3658		return err;
3659
3660	adjust_proxy_tun_qkey(dev, vhcr, context);
3661	update_gid(dev, inbox, (u8)slave);
3662	update_pkey_index(dev, slave, inbox);
3663	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3664}
3665
3666int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3667			    struct mlx4_vhcr *vhcr,
3668			    struct mlx4_cmd_mailbox *inbox,
3669			    struct mlx4_cmd_mailbox *outbox,
3670			    struct mlx4_cmd_info *cmd)
3671{
3672	int err;
3673	struct mlx4_qp_context *context = inbox->buf + 8;
3674
3675	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3676	if (err)
3677		return err;
3678	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3679	if (err)
3680		return err;
3681
3682	adjust_proxy_tun_qkey(dev, vhcr, context);
3683	update_gid(dev, inbox, (u8)slave);
3684	update_pkey_index(dev, slave, inbox);
3685	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3686}
3687
3688int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3689			 struct mlx4_vhcr *vhcr,
3690			 struct mlx4_cmd_mailbox *inbox,
3691			 struct mlx4_cmd_mailbox *outbox,
3692			 struct mlx4_cmd_info *cmd)
3693{
3694	int err;
3695	int qpn = vhcr->in_modifier & 0x7fffff;
3696	struct res_qp *qp;
3697
3698	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3699	if (err)
3700		return err;
3701	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3702	if (err)
3703		goto ex_abort;
3704
3705	atomic_dec(&qp->mtt->ref_count);
3706	atomic_dec(&qp->rcq->ref_count);
3707	atomic_dec(&qp->scq->ref_count);
3708	if (qp->srq)
3709		atomic_dec(&qp->srq->ref_count);
3710	res_end_move(dev, slave, RES_QP, qpn);
3711	return 0;
3712
3713ex_abort:
3714	res_abort_move(dev, slave, RES_QP, qpn);
3715
3716	return err;
3717}
3718
3719static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3720				struct res_qp *rqp, u8 *gid)
3721{
3722	struct res_gid *res;
3723
3724	list_for_each_entry(res, &rqp->mcg_list, list) {
3725		if (!memcmp(res->gid, gid, 16))
3726			return res;
3727	}
3728	return NULL;
3729}
3730
3731static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3732		       u8 *gid, enum mlx4_protocol prot,
3733		       enum mlx4_steer_type steer, u64 reg_id)
3734{
3735	struct res_gid *res;
3736	int err;
3737
3738	res = kzalloc(sizeof *res, GFP_KERNEL);
3739	if (!res)
3740		return -ENOMEM;
3741
3742	spin_lock_irq(&rqp->mcg_spl);
3743	if (find_gid(dev, slave, rqp, gid)) {
3744		kfree(res);
3745		err = -EEXIST;
3746	} else {
3747		memcpy(res->gid, gid, 16);
3748		res->prot = prot;
3749		res->steer = steer;
3750		res->reg_id = reg_id;
3751		list_add_tail(&res->list, &rqp->mcg_list);
3752		err = 0;
3753	}
3754	spin_unlock_irq(&rqp->mcg_spl);
3755
3756	return err;
3757}
3758
3759static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3760		       u8 *gid, enum mlx4_protocol prot,
3761		       enum mlx4_steer_type steer, u64 *reg_id)
3762{
3763	struct res_gid *res;
3764	int err;
3765
3766	spin_lock_irq(&rqp->mcg_spl);
3767	res = find_gid(dev, slave, rqp, gid);
3768	if (!res || res->prot != prot || res->steer != steer)
3769		err = -EINVAL;
3770	else {
3771		*reg_id = res->reg_id;
3772		list_del(&res->list);
3773		kfree(res);
3774		err = 0;
3775	}
3776	spin_unlock_irq(&rqp->mcg_spl);
3777
3778	return err;
3779}
3780
3781static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3782		     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3783		     enum mlx4_steer_type type, u64 *reg_id)
3784{
3785	switch (dev->caps.steering_mode) {
3786	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3787		int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3788		if (port < 0)
3789			return port;
3790		return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3791						block_loopback, prot,
3792						reg_id);
3793	}
3794	case MLX4_STEERING_MODE_B0:
3795		if (prot == MLX4_PROT_ETH) {
3796			int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3797			if (port < 0)
3798				return port;
3799			gid[5] = port;
3800		}
3801		return mlx4_qp_attach_common(dev, qp, gid,
3802					    block_loopback, prot, type);
3803	default:
3804		return -EINVAL;
3805	}
3806}
3807
3808static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3809		     u8 gid[16], enum mlx4_protocol prot,
3810		     enum mlx4_steer_type type, u64 reg_id)
3811{
3812	switch (dev->caps.steering_mode) {
3813	case MLX4_STEERING_MODE_DEVICE_MANAGED:
3814		return mlx4_flow_detach(dev, reg_id);
3815	case MLX4_STEERING_MODE_B0:
3816		return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3817	default:
3818		return -EINVAL;
3819	}
3820}
3821
3822static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3823			    u8 *gid, enum mlx4_protocol prot)
3824{
3825	int real_port;
3826
3827	if (prot != MLX4_PROT_ETH)
3828		return 0;
3829
3830	if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3831	    dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3832		real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3833		if (real_port < 0)
3834			return -EINVAL;
3835		gid[5] = real_port;
3836	}
3837
3838	return 0;
3839}
3840
3841int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3842			       struct mlx4_vhcr *vhcr,
3843			       struct mlx4_cmd_mailbox *inbox,
3844			       struct mlx4_cmd_mailbox *outbox,
3845			       struct mlx4_cmd_info *cmd)
3846{
3847	struct mlx4_qp qp; /* dummy for calling attach/detach */
3848	u8 *gid = inbox->buf;
3849	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3850	int err;
3851	int qpn;
3852	struct res_qp *rqp;
3853	u64 reg_id = 0;
3854	int attach = vhcr->op_modifier;
3855	int block_loopback = vhcr->in_modifier >> 31;
3856	u8 steer_type_mask = 2;
3857	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3858
3859	qpn = vhcr->in_modifier & 0xffffff;
3860	err = get_res(dev, slave, qpn, RES_QP, &rqp);
3861	if (err)
3862		return err;
3863
3864	qp.qpn = qpn;
3865	if (attach) {
3866		err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3867				type, &reg_id);
3868		if (err) {
3869			pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3870			goto ex_put;
3871		}
3872		err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3873		if (err)
3874			goto ex_detach;
3875	} else {
3876		err = mlx4_adjust_port(dev, slave, gid, prot);
3877		if (err)
3878			goto ex_put;
3879
3880		err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3881		if (err)
3882			goto ex_put;
3883
3884		err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3885		if (err)
3886			pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3887			       qpn, reg_id);
3888	}
3889	put_res(dev, slave, qpn, RES_QP);
3890	return err;
3891
3892ex_detach:
3893	qp_detach(dev, &qp, gid, prot, type, reg_id);
3894ex_put:
3895	put_res(dev, slave, qpn, RES_QP);
3896	return err;
3897}
3898
3899/*
3900 * MAC validation for Flow Steering rules.
3901 * VF can attach rules only with a mac address which is assigned to it.
3902 */
3903static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3904				   struct list_head *rlist)
3905{
3906	struct mac_res *res, *tmp;
3907	__be64 be_mac;
3908
3909	/* make sure it isn't multicast or broadcast mac*/
3910	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3911	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3912		list_for_each_entry_safe(res, tmp, rlist, list) {
3913			be_mac = cpu_to_be64(res->mac << 16);
3914			if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3915				return 0;
3916		}
3917		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3918		       eth_header->eth.dst_mac, slave);
3919		return -EINVAL;
3920	}
3921	return 0;
3922}
3923
3924/*
3925 * In case of missing eth header, append eth header with a MAC address
3926 * assigned to the VF.
3927 */
3928static int add_eth_header(struct mlx4_dev *dev, int slave,
3929			  struct mlx4_cmd_mailbox *inbox,
3930			  struct list_head *rlist, int header_id)
3931{
3932	struct mac_res *res, *tmp;
3933	u8 port;
3934	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3935	struct mlx4_net_trans_rule_hw_eth *eth_header;
3936	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3937	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3938	__be64 be_mac = 0;
3939	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3940
3941	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3942	port = ctrl->port;
3943	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3944
3945	/* Clear a space in the inbox for eth header */
3946	switch (header_id) {
3947	case MLX4_NET_TRANS_RULE_ID_IPV4:
3948		ip_header =
3949			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3950		memmove(ip_header, eth_header,
3951			sizeof(*ip_header) + sizeof(*l4_header));
3952		break;
3953	case MLX4_NET_TRANS_RULE_ID_TCP:
3954	case MLX4_NET_TRANS_RULE_ID_UDP:
3955		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3956			    (eth_header + 1);
3957		memmove(l4_header, eth_header, sizeof(*l4_header));
3958		break;
3959	default:
3960		return -EINVAL;
3961	}
3962	list_for_each_entry_safe(res, tmp, rlist, list) {
3963		if (port == res->port) {
3964			be_mac = cpu_to_be64(res->mac << 16);
3965			break;
3966		}
3967	}
3968	if (!be_mac) {
3969		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3970		       port);
3971		return -EINVAL;
3972	}
3973
3974	memset(eth_header, 0, sizeof(*eth_header));
3975	eth_header->size = sizeof(*eth_header) >> 2;
3976	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3977	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3978	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3979
3980	return 0;
3981
3982}
3983
3984#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3985int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3986			   struct mlx4_vhcr *vhcr,
3987			   struct mlx4_cmd_mailbox *inbox,
3988			   struct mlx4_cmd_mailbox *outbox,
3989			   struct mlx4_cmd_info *cmd_info)
3990{
3991	int err;
3992	u32 qpn = vhcr->in_modifier & 0xffffff;
3993	struct res_qp *rqp;
3994	u64 mac;
3995	unsigned port;
3996	u64 pri_addr_path_mask;
3997	struct mlx4_update_qp_context *cmd;
3998	int smac_index;
3999
4000	cmd = (struct mlx4_update_qp_context *)inbox->buf;
4001
4002	pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4003	if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4004	    (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4005		return -EPERM;
4006
4007	/* Just change the smac for the QP */
4008	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4009	if (err) {
4010		mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4011		return err;
4012	}
4013
4014	port = (rqp->sched_queue >> 6 & 1) + 1;
4015
4016	if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4017		smac_index = cmd->qp_context.pri_path.grh_mylmc;
4018		err = mac_find_smac_ix_in_slave(dev, slave, port,
4019						smac_index, &mac);
4020
4021		if (err) {
4022			mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4023				 qpn, smac_index);
4024			goto err_mac;
4025		}
4026	}
4027
4028	err = mlx4_cmd(dev, inbox->dma,
4029		       vhcr->in_modifier, 0,
4030		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4031		       MLX4_CMD_NATIVE);
4032	if (err) {
4033		mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4034		goto err_mac;
4035	}
4036
4037err_mac:
4038	put_res(dev, slave, qpn, RES_QP);
4039	return err;
4040}
4041
4042int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4043					 struct mlx4_vhcr *vhcr,
4044					 struct mlx4_cmd_mailbox *inbox,
4045					 struct mlx4_cmd_mailbox *outbox,
4046					 struct mlx4_cmd_info *cmd)
4047{
4048
4049	struct mlx4_priv *priv = mlx4_priv(dev);
4050	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4051	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4052	int err;
4053	int qpn;
4054	struct res_qp *rqp;
4055	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4056	struct _rule_hw  *rule_header;
4057	int header_id;
4058
4059	if (dev->caps.steering_mode !=
4060	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4061		return -EOPNOTSUPP;
4062
4063	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4064	ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4065	if (ctrl->port <= 0)
4066		return -EINVAL;
4067	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4068	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4069	if (err) {
4070		pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4071		return err;
4072	}
4073	rule_header = (struct _rule_hw *)(ctrl + 1);
4074	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4075
4076	switch (header_id) {
4077	case MLX4_NET_TRANS_RULE_ID_ETH:
4078		if (validate_eth_header_mac(slave, rule_header, rlist)) {
4079			err = -EINVAL;
4080			goto err_put;
4081		}
4082		break;
4083	case MLX4_NET_TRANS_RULE_ID_IB:
4084		break;
4085	case MLX4_NET_TRANS_RULE_ID_IPV4:
4086	case MLX4_NET_TRANS_RULE_ID_TCP:
4087	case MLX4_NET_TRANS_RULE_ID_UDP:
4088		pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4089		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4090			err = -EINVAL;
4091			goto err_put;
4092		}
4093		vhcr->in_modifier +=
4094			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4095		break;
4096	default:
4097		pr_err("Corrupted mailbox\n");
4098		err = -EINVAL;
4099		goto err_put;
4100	}
4101
4102	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4103			   vhcr->in_modifier, 0,
4104			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4105			   MLX4_CMD_NATIVE);
4106	if (err)
4107		goto err_put;
4108
4109	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4110	if (err) {
4111		mlx4_err(dev, "Fail to add flow steering resources\n");
4112		/* detach rule*/
4113		mlx4_cmd(dev, vhcr->out_param, 0, 0,
4114			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4115			 MLX4_CMD_NATIVE);
4116		goto err_put;
4117	}
4118	atomic_inc(&rqp->ref_count);
4119err_put:
4120	put_res(dev, slave, qpn, RES_QP);
4121	return err;
4122}
4123
4124int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4125					 struct mlx4_vhcr *vhcr,
4126					 struct mlx4_cmd_mailbox *inbox,
4127					 struct mlx4_cmd_mailbox *outbox,
4128					 struct mlx4_cmd_info *cmd)
4129{
4130	int err;
4131	struct res_qp *rqp;
4132	struct res_fs_rule *rrule;
4133
4134	if (dev->caps.steering_mode !=
4135	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4136		return -EOPNOTSUPP;
4137
4138	err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4139	if (err)
4140		return err;
4141	/* Release the rule form busy state before removal */
4142	put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4143	err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4144	if (err)
4145		return err;
4146
4147	err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4148	if (err) {
4149		mlx4_err(dev, "Fail to remove flow steering resources\n");
4150		goto out;
4151	}
4152
4153	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4154		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4155		       MLX4_CMD_NATIVE);
4156	if (!err)
4157		atomic_dec(&rqp->ref_count);
4158out:
4159	put_res(dev, slave, rrule->qpn, RES_QP);
4160	return err;
4161}
4162
4163enum {
4164	BUSY_MAX_RETRIES = 10
4165};
4166
4167int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4168			       struct mlx4_vhcr *vhcr,
4169			       struct mlx4_cmd_mailbox *inbox,
4170			       struct mlx4_cmd_mailbox *outbox,
4171			       struct mlx4_cmd_info *cmd)
4172{
4173	int err;
4174	int index = vhcr->in_modifier & 0xffff;
4175
4176	err = get_res(dev, slave, index, RES_COUNTER, NULL);
4177	if (err)
4178		return err;
4179
4180	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4181	put_res(dev, slave, index, RES_COUNTER);
4182	return err;
4183}
4184
4185static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4186{
4187	struct res_gid *rgid;
4188	struct res_gid *tmp;
4189	struct mlx4_qp qp; /* dummy for calling attach/detach */
4190
4191	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4192		switch (dev->caps.steering_mode) {
4193		case MLX4_STEERING_MODE_DEVICE_MANAGED:
4194			mlx4_flow_detach(dev, rgid->reg_id);
4195			break;
4196		case MLX4_STEERING_MODE_B0:
4197			qp.qpn = rqp->local_qpn;
4198			(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4199						     rgid->prot, rgid->steer);
4200			break;
4201		}
4202		list_del(&rgid->list);
4203		kfree(rgid);
4204	}
4205}
4206
4207static int _move_all_busy(struct mlx4_dev *dev, int slave,
4208			  enum mlx4_resource type, int print)
4209{
4210	struct mlx4_priv *priv = mlx4_priv(dev);
4211	struct mlx4_resource_tracker *tracker =
4212		&priv->mfunc.master.res_tracker;
4213	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4214	struct res_common *r;
4215	struct res_common *tmp;
4216	int busy;
4217
4218	busy = 0;
4219	spin_lock_irq(mlx4_tlock(dev));
4220	list_for_each_entry_safe(r, tmp, rlist, list) {
4221		if (r->owner == slave) {
4222			if (!r->removing) {
4223				if (r->state == RES_ANY_BUSY) {
4224					if (print)
4225						mlx4_dbg(dev,
4226							 "%s id 0x%llx is busy\n",
4227							  resource_str(type),
4228							  r->res_id);
4229					++busy;
4230				} else {
4231					r->from_state = r->state;
4232					r->state = RES_ANY_BUSY;
4233					r->removing = 1;
4234				}
4235			}
4236		}
4237	}
4238	spin_unlock_irq(mlx4_tlock(dev));
4239
4240	return busy;
4241}
4242
4243static int move_all_busy(struct mlx4_dev *dev, int slave,
4244			 enum mlx4_resource type)
4245{
4246	unsigned long begin;
4247	int busy;
4248
4249	begin = jiffies;
4250	do {
4251		busy = _move_all_busy(dev, slave, type, 0);
4252		if (time_after(jiffies, begin + 5 * HZ))
4253			break;
4254		if (busy)
4255			cond_resched();
4256	} while (busy);
4257
4258	if (busy)
4259		busy = _move_all_busy(dev, slave, type, 1);
4260
4261	return busy;
4262}
4263static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4264{
4265	struct mlx4_priv *priv = mlx4_priv(dev);
4266	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4267	struct list_head *qp_list =
4268		&tracker->slave_list[slave].res_list[RES_QP];
4269	struct res_qp *qp;
4270	struct res_qp *tmp;
4271	int state;
4272	u64 in_param;
4273	int qpn;
4274	int err;
4275
4276	err = move_all_busy(dev, slave, RES_QP);
4277	if (err)
4278		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4279			  slave);
4280
4281	spin_lock_irq(mlx4_tlock(dev));
4282	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4283		spin_unlock_irq(mlx4_tlock(dev));
4284		if (qp->com.owner == slave) {
4285			qpn = qp->com.res_id;
4286			detach_qp(dev, slave, qp);
4287			state = qp->com.from_state;
4288			while (state != 0) {
4289				switch (state) {
4290				case RES_QP_RESERVED:
4291					spin_lock_irq(mlx4_tlock(dev));
4292					rb_erase(&qp->com.node,
4293						 &tracker->res_tree[RES_QP]);
4294					list_del(&qp->com.list);
4295					spin_unlock_irq(mlx4_tlock(dev));
4296					if (!valid_reserved(dev, slave, qpn)) {
4297						__mlx4_qp_release_range(dev, qpn, 1);
4298						mlx4_release_resource(dev, slave,
4299								      RES_QP, 1, 0);
4300					}
4301					kfree(qp);
4302					state = 0;
4303					break;
4304				case RES_QP_MAPPED:
4305					if (!valid_reserved(dev, slave, qpn))
4306						__mlx4_qp_free_icm(dev, qpn);
4307					state = RES_QP_RESERVED;
4308					break;
4309				case RES_QP_HW:
4310					in_param = slave;
4311					err = mlx4_cmd(dev, in_param,
4312						       qp->local_qpn, 2,
4313						       MLX4_CMD_2RST_QP,
4314						       MLX4_CMD_TIME_CLASS_A,
4315						       MLX4_CMD_NATIVE);
4316					if (err)
4317						mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4318							 slave, qp->local_qpn);
4319					atomic_dec(&qp->rcq->ref_count);
4320					atomic_dec(&qp->scq->ref_count);
4321					atomic_dec(&qp->mtt->ref_count);
4322					if (qp->srq)
4323						atomic_dec(&qp->srq->ref_count);
4324					state = RES_QP_MAPPED;
4325					break;
4326				default:
4327					state = 0;
4328				}
4329			}
4330		}
4331		spin_lock_irq(mlx4_tlock(dev));
4332	}
4333	spin_unlock_irq(mlx4_tlock(dev));
4334}
4335
4336static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4337{
4338	struct mlx4_priv *priv = mlx4_priv(dev);
4339	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4340	struct list_head *srq_list =
4341		&tracker->slave_list[slave].res_list[RES_SRQ];
4342	struct res_srq *srq;
4343	struct res_srq *tmp;
4344	int state;
4345	u64 in_param;
4346	LIST_HEAD(tlist);
4347	int srqn;
4348	int err;
4349
4350	err = move_all_busy(dev, slave, RES_SRQ);
4351	if (err)
4352		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4353			  slave);
4354
4355	spin_lock_irq(mlx4_tlock(dev));
4356	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4357		spin_unlock_irq(mlx4_tlock(dev));
4358		if (srq->com.owner == slave) {
4359			srqn = srq->com.res_id;
4360			state = srq->com.from_state;
4361			while (state != 0) {
4362				switch (state) {
4363				case RES_SRQ_ALLOCATED:
4364					__mlx4_srq_free_icm(dev, srqn);
4365					spin_lock_irq(mlx4_tlock(dev));
4366					rb_erase(&srq->com.node,
4367						 &tracker->res_tree[RES_SRQ]);
4368					list_del(&srq->com.list);
4369					spin_unlock_irq(mlx4_tlock(dev));
4370					mlx4_release_resource(dev, slave,
4371							      RES_SRQ, 1, 0);
4372					kfree(srq);
4373					state = 0;
4374					break;
4375
4376				case RES_SRQ_HW:
4377					in_param = slave;
4378					err = mlx4_cmd(dev, in_param, srqn, 1,
4379						       MLX4_CMD_HW2SW_SRQ,
4380						       MLX4_CMD_TIME_CLASS_A,
4381						       MLX4_CMD_NATIVE);
4382					if (err)
4383						mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4384							 slave, srqn);
4385
4386					atomic_dec(&srq->mtt->ref_count);
4387					if (srq->cq)
4388						atomic_dec(&srq->cq->ref_count);
4389					state = RES_SRQ_ALLOCATED;
4390					break;
4391
4392				default:
4393					state = 0;
4394				}
4395			}
4396		}
4397		spin_lock_irq(mlx4_tlock(dev));
4398	}
4399	spin_unlock_irq(mlx4_tlock(dev));
4400}
4401
4402static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4403{
4404	struct mlx4_priv *priv = mlx4_priv(dev);
4405	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4406	struct list_head *cq_list =
4407		&tracker->slave_list[slave].res_list[RES_CQ];
4408	struct res_cq *cq;
4409	struct res_cq *tmp;
4410	int state;
4411	u64 in_param;
4412	LIST_HEAD(tlist);
4413	int cqn;
4414	int err;
4415
4416	err = move_all_busy(dev, slave, RES_CQ);
4417	if (err)
4418		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4419			  slave);
4420
4421	spin_lock_irq(mlx4_tlock(dev));
4422	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4423		spin_unlock_irq(mlx4_tlock(dev));
4424		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4425			cqn = cq->com.res_id;
4426			state = cq->com.from_state;
4427			while (state != 0) {
4428				switch (state) {
4429				case RES_CQ_ALLOCATED:
4430					__mlx4_cq_free_icm(dev, cqn);
4431					spin_lock_irq(mlx4_tlock(dev));
4432					rb_erase(&cq->com.node,
4433						 &tracker->res_tree[RES_CQ]);
4434					list_del(&cq->com.list);
4435					spin_unlock_irq(mlx4_tlock(dev));
4436					mlx4_release_resource(dev, slave,
4437							      RES_CQ, 1, 0);
4438					kfree(cq);
4439					state = 0;
4440					break;
4441
4442				case RES_CQ_HW:
4443					in_param = slave;
4444					err = mlx4_cmd(dev, in_param, cqn, 1,
4445						       MLX4_CMD_HW2SW_CQ,
4446						       MLX4_CMD_TIME_CLASS_A,
4447						       MLX4_CMD_NATIVE);
4448					if (err)
4449						mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4450							 slave, cqn);
4451					atomic_dec(&cq->mtt->ref_count);
4452					state = RES_CQ_ALLOCATED;
4453					break;
4454
4455				default:
4456					state = 0;
4457				}
4458			}
4459		}
4460		spin_lock_irq(mlx4_tlock(dev));
4461	}
4462	spin_unlock_irq(mlx4_tlock(dev));
4463}
4464
4465static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4466{
4467	struct mlx4_priv *priv = mlx4_priv(dev);
4468	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4469	struct list_head *mpt_list =
4470		&tracker->slave_list[slave].res_list[RES_MPT];
4471	struct res_mpt *mpt;
4472	struct res_mpt *tmp;
4473	int state;
4474	u64 in_param;
4475	LIST_HEAD(tlist);
4476	int mptn;
4477	int err;
4478
4479	err = move_all_busy(dev, slave, RES_MPT);
4480	if (err)
4481		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4482			  slave);
4483
4484	spin_lock_irq(mlx4_tlock(dev));
4485	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4486		spin_unlock_irq(mlx4_tlock(dev));
4487		if (mpt->com.owner == slave) {
4488			mptn = mpt->com.res_id;
4489			state = mpt->com.from_state;
4490			while (state != 0) {
4491				switch (state) {
4492				case RES_MPT_RESERVED:
4493					__mlx4_mpt_release(dev, mpt->key);
4494					spin_lock_irq(mlx4_tlock(dev));
4495					rb_erase(&mpt->com.node,
4496						 &tracker->res_tree[RES_MPT]);
4497					list_del(&mpt->com.list);
4498					spin_unlock_irq(mlx4_tlock(dev));
4499					mlx4_release_resource(dev, slave,
4500							      RES_MPT, 1, 0);
4501					kfree(mpt);
4502					state = 0;
4503					break;
4504
4505				case RES_MPT_MAPPED:
4506					__mlx4_mpt_free_icm(dev, mpt->key);
4507					state = RES_MPT_RESERVED;
4508					break;
4509
4510				case RES_MPT_HW:
4511					in_param = slave;
4512					err = mlx4_cmd(dev, in_param, mptn, 0,
4513						     MLX4_CMD_HW2SW_MPT,
4514						     MLX4_CMD_TIME_CLASS_A,
4515						     MLX4_CMD_NATIVE);
4516					if (err)
4517						mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4518							 slave, mptn);
4519					if (mpt->mtt)
4520						atomic_dec(&mpt->mtt->ref_count);
4521					state = RES_MPT_MAPPED;
4522					break;
4523				default:
4524					state = 0;
4525				}
4526			}
4527		}
4528		spin_lock_irq(mlx4_tlock(dev));
4529	}
4530	spin_unlock_irq(mlx4_tlock(dev));
4531}
4532
4533static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4534{
4535	struct mlx4_priv *priv = mlx4_priv(dev);
4536	struct mlx4_resource_tracker *tracker =
4537		&priv->mfunc.master.res_tracker;
4538	struct list_head *mtt_list =
4539		&tracker->slave_list[slave].res_list[RES_MTT];
4540	struct res_mtt *mtt;
4541	struct res_mtt *tmp;
4542	int state;
4543	LIST_HEAD(tlist);
4544	int base;
4545	int err;
4546
4547	err = move_all_busy(dev, slave, RES_MTT);
4548	if (err)
4549		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4550			  slave);
4551
4552	spin_lock_irq(mlx4_tlock(dev));
4553	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4554		spin_unlock_irq(mlx4_tlock(dev));
4555		if (mtt->com.owner == slave) {
4556			base = mtt->com.res_id;
4557			state = mtt->com.from_state;
4558			while (state != 0) {
4559				switch (state) {
4560				case RES_MTT_ALLOCATED:
4561					__mlx4_free_mtt_range(dev, base,
4562							      mtt->order);
4563					spin_lock_irq(mlx4_tlock(dev));
4564					rb_erase(&mtt->com.node,
4565						 &tracker->res_tree[RES_MTT]);
4566					list_del(&mtt->com.list);
4567					spin_unlock_irq(mlx4_tlock(dev));
4568					mlx4_release_resource(dev, slave, RES_MTT,
4569							      1 << mtt->order, 0);
4570					kfree(mtt);
4571					state = 0;
4572					break;
4573
4574				default:
4575					state = 0;
4576				}
4577			}
4578		}
4579		spin_lock_irq(mlx4_tlock(dev));
4580	}
4581	spin_unlock_irq(mlx4_tlock(dev));
4582}
4583
4584static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4585{
4586	struct mlx4_priv *priv = mlx4_priv(dev);
4587	struct mlx4_resource_tracker *tracker =
4588		&priv->mfunc.master.res_tracker;
4589	struct list_head *fs_rule_list =
4590		&tracker->slave_list[slave].res_list[RES_FS_RULE];
4591	struct res_fs_rule *fs_rule;
4592	struct res_fs_rule *tmp;
4593	int state;
4594	u64 base;
4595	int err;
4596
4597	err = move_all_busy(dev, slave, RES_FS_RULE);
4598	if (err)
4599		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4600			  slave);
4601
4602	spin_lock_irq(mlx4_tlock(dev));
4603	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4604		spin_unlock_irq(mlx4_tlock(dev));
4605		if (fs_rule->com.owner == slave) {
4606			base = fs_rule->com.res_id;
4607			state = fs_rule->com.from_state;
4608			while (state != 0) {
4609				switch (state) {
4610				case RES_FS_RULE_ALLOCATED:
4611					/* detach rule */
4612					err = mlx4_cmd(dev, base, 0, 0,
4613						       MLX4_QP_FLOW_STEERING_DETACH,
4614						       MLX4_CMD_TIME_CLASS_A,
4615						       MLX4_CMD_NATIVE);
4616
4617					spin_lock_irq(mlx4_tlock(dev));
4618					rb_erase(&fs_rule->com.node,
4619						 &tracker->res_tree[RES_FS_RULE]);
4620					list_del(&fs_rule->com.list);
4621					spin_unlock_irq(mlx4_tlock(dev));
4622					kfree(fs_rule);
4623					state = 0;
4624					break;
4625
4626				default:
4627					state = 0;
4628				}
4629			}
4630		}
4631		spin_lock_irq(mlx4_tlock(dev));
4632	}
4633	spin_unlock_irq(mlx4_tlock(dev));
4634}
4635
4636static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4637{
4638	struct mlx4_priv *priv = mlx4_priv(dev);
4639	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4640	struct list_head *eq_list =
4641		&tracker->slave_list[slave].res_list[RES_EQ];
4642	struct res_eq *eq;
4643	struct res_eq *tmp;
4644	int err;
4645	int state;
4646	LIST_HEAD(tlist);
4647	int eqn;
4648	struct mlx4_cmd_mailbox *mailbox;
4649
4650	err = move_all_busy(dev, slave, RES_EQ);
4651	if (err)
4652		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4653			  slave);
4654
4655	spin_lock_irq(mlx4_tlock(dev));
4656	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4657		spin_unlock_irq(mlx4_tlock(dev));
4658		if (eq->com.owner == slave) {
4659			eqn = eq->com.res_id;
4660			state = eq->com.from_state;
4661			while (state != 0) {
4662				switch (state) {
4663				case RES_EQ_RESERVED:
4664					spin_lock_irq(mlx4_tlock(dev));
4665					rb_erase(&eq->com.node,
4666						 &tracker->res_tree[RES_EQ]);
4667					list_del(&eq->com.list);
4668					spin_unlock_irq(mlx4_tlock(dev));
4669					kfree(eq);
4670					state = 0;
4671					break;
4672
4673				case RES_EQ_HW:
4674					mailbox = mlx4_alloc_cmd_mailbox(dev);
4675					if (IS_ERR(mailbox)) {
4676						cond_resched();
4677						continue;
4678					}
4679					err = mlx4_cmd_box(dev, slave, 0,
4680							   eqn & 0xff, 0,
4681							   MLX4_CMD_HW2SW_EQ,
4682							   MLX4_CMD_TIME_CLASS_A,
4683							   MLX4_CMD_NATIVE);
4684					if (err)
4685						mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4686							 slave, eqn);
4687					mlx4_free_cmd_mailbox(dev, mailbox);
4688					atomic_dec(&eq->mtt->ref_count);
4689					state = RES_EQ_RESERVED;
4690					break;
4691
4692				default:
4693					state = 0;
4694				}
4695			}
4696		}
4697		spin_lock_irq(mlx4_tlock(dev));
4698	}
4699	spin_unlock_irq(mlx4_tlock(dev));
4700}
4701
4702static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4703{
4704	struct mlx4_priv *priv = mlx4_priv(dev);
4705	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4706	struct list_head *counter_list =
4707		&tracker->slave_list[slave].res_list[RES_COUNTER];
4708	struct res_counter *counter;
4709	struct res_counter *tmp;
4710	int err;
4711	int index;
4712
4713	err = move_all_busy(dev, slave, RES_COUNTER);
4714	if (err)
4715		mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4716			  slave);
4717
4718	spin_lock_irq(mlx4_tlock(dev));
4719	list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4720		if (counter->com.owner == slave) {
4721			index = counter->com.res_id;
4722			rb_erase(&counter->com.node,
4723				 &tracker->res_tree[RES_COUNTER]);
4724			list_del(&counter->com.list);
4725			kfree(counter);
4726			__mlx4_counter_free(dev, index);
4727			mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4728		}
4729	}
4730	spin_unlock_irq(mlx4_tlock(dev));
4731}
4732
4733static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4734{
4735	struct mlx4_priv *priv = mlx4_priv(dev);
4736	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4737	struct list_head *xrcdn_list =
4738		&tracker->slave_list[slave].res_list[RES_XRCD];
4739	struct res_xrcdn *xrcd;
4740	struct res_xrcdn *tmp;
4741	int err;
4742	int xrcdn;
4743
4744	err = move_all_busy(dev, slave, RES_XRCD);
4745	if (err)
4746		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4747			  slave);
4748
4749	spin_lock_irq(mlx4_tlock(dev));
4750	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4751		if (xrcd->com.owner == slave) {
4752			xrcdn = xrcd->com.res_id;
4753			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4754			list_del(&xrcd->com.list);
4755			kfree(xrcd);
4756			__mlx4_xrcd_free(dev, xrcdn);
4757		}
4758	}
4759	spin_unlock_irq(mlx4_tlock(dev));
4760}
4761
4762void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4763{
4764	struct mlx4_priv *priv = mlx4_priv(dev);
4765	mlx4_reset_roce_gids(dev, slave);
4766	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4767	rem_slave_vlans(dev, slave);
4768	rem_slave_macs(dev, slave);
4769	rem_slave_fs_rule(dev, slave);
4770	rem_slave_qps(dev, slave);
4771	rem_slave_srqs(dev, slave);
4772	rem_slave_cqs(dev, slave);
4773	rem_slave_mrs(dev, slave);
4774	rem_slave_eqs(dev, slave);
4775	rem_slave_mtts(dev, slave);
4776	rem_slave_counters(dev, slave);
4777	rem_slave_xrcdns(dev, slave);
4778	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4779}
4780
4781void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4782{
4783	struct mlx4_vf_immed_vlan_work *work =
4784		container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4785	struct mlx4_cmd_mailbox *mailbox;
4786	struct mlx4_update_qp_context *upd_context;
4787	struct mlx4_dev *dev = &work->priv->dev;
4788	struct mlx4_resource_tracker *tracker =
4789		&work->priv->mfunc.master.res_tracker;
4790	struct list_head *qp_list =
4791		&tracker->slave_list[work->slave].res_list[RES_QP];
4792	struct res_qp *qp;
4793	struct res_qp *tmp;
4794	u64 qp_path_mask_vlan_ctrl =
4795		       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4796		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4797		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4798		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4799		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4800		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4801
4802	u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4803		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4804		       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4805		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4806		       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4807		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4808		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4809
4810	int err;
4811	int port, errors = 0;
4812	u8 vlan_control;
4813
4814	if (mlx4_is_slave(dev)) {
4815		mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4816			  work->slave);
4817		goto out;
4818	}
4819
4820	mailbox = mlx4_alloc_cmd_mailbox(dev);
4821	if (IS_ERR(mailbox))
4822		goto out;
4823	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4824		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4825			MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4826			MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4827			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4828			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4829			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4830	else if (!work->vlan_id)
4831		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4832			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4833	else
4834		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4835			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4836			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4837
4838	upd_context = mailbox->buf;
4839	upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4840
4841	spin_lock_irq(mlx4_tlock(dev));
4842	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4843		spin_unlock_irq(mlx4_tlock(dev));
4844		if (qp->com.owner == work->slave) {
4845			if (qp->com.from_state != RES_QP_HW ||
4846			    !qp->sched_queue ||  /* no INIT2RTR trans yet */
4847			    mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4848			    qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4849				spin_lock_irq(mlx4_tlock(dev));
4850				continue;
4851			}
4852			port = (qp->sched_queue >> 6 & 1) + 1;
4853			if (port != work->port) {
4854				spin_lock_irq(mlx4_tlock(dev));
4855				continue;
4856			}
4857			if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4858				upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4859			else
4860				upd_context->primary_addr_path_mask =
4861					cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4862			if (work->vlan_id == MLX4_VGT) {
4863				upd_context->qp_context.param3 = qp->param3;
4864				upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4865				upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4866				upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4867				upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4868				upd_context->qp_context.pri_path.feup = qp->feup;
4869				upd_context->qp_context.pri_path.sched_queue =
4870					qp->sched_queue;
4871			} else {
4872				upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4873				upd_context->qp_context.pri_path.vlan_control = vlan_control;
4874				upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4875				upd_context->qp_context.pri_path.fvl_rx =
4876					qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4877				upd_context->qp_context.pri_path.fl =
4878					qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4879				upd_context->qp_context.pri_path.feup =
4880					qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4881				upd_context->qp_context.pri_path.sched_queue =
4882					qp->sched_queue & 0xC7;
4883				upd_context->qp_context.pri_path.sched_queue |=
4884					((work->qos & 0x7) << 3);
4885			}
4886
4887			err = mlx4_cmd(dev, mailbox->dma,
4888				       qp->local_qpn & 0xffffff,
4889				       0, MLX4_CMD_UPDATE_QP,
4890				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4891			if (err) {
4892				mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4893					  work->slave, port, qp->local_qpn, err);
4894				errors++;
4895			}
4896		}
4897		spin_lock_irq(mlx4_tlock(dev));
4898	}
4899	spin_unlock_irq(mlx4_tlock(dev));
4900	mlx4_free_cmd_mailbox(dev, mailbox);
4901
4902	if (errors)
4903		mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4904			 errors, work->slave, work->port);
4905
4906	/* unregister previous vlan_id if needed and we had no errors
4907	 * while updating the QPs
4908	 */
4909	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4910	    NO_INDX != work->orig_vlan_ix)
4911		__mlx4_unregister_vlan(&work->priv->dev, work->port,
4912				       work->orig_vlan_id);
4913out:
4914	kfree(work);
4915	return;
4916}
4917