1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/if_ether.h>
35#include <linux/export.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40
41#define MLX4_MAC_VALID		(1ull << 63)
42#define MLX4_MAC_MASK		0xffffffffffffULL
43
44#define MLX4_VLAN_VALID		(1u << 31)
45#define MLX4_VLAN_MASK		0xfff
46
47#define MLX4_STATS_TRAFFIC_COUNTERS_MASK	0xfULL
48#define MLX4_STATS_TRAFFIC_DROPS_MASK		0xc0ULL
49#define MLX4_STATS_ERROR_COUNTERS_MASK		0x1ffc30ULL
50#define MLX4_STATS_PORT_COUNTERS_MASK		0x1fe00000ULL
51
52void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
53{
54	int i;
55
56	mutex_init(&table->mutex);
57	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
58		table->entries[i] = 0;
59		table->refs[i]	 = 0;
60	}
61	table->max   = 1 << dev->caps.log_num_macs;
62	table->total = 0;
63}
64
65void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
66{
67	int i;
68
69	mutex_init(&table->mutex);
70	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
71		table->entries[i] = 0;
72		table->refs[i]	 = 0;
73	}
74	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
75	table->total = 0;
76}
77
78static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
79{
80	struct mlx4_qp qp;
81	u8 gid[16] = {0};
82	__be64 be_mac;
83	int err;
84
85	qp.qpn = *qpn;
86
87	mac &= 0xffffffffffffULL;
88	be_mac = cpu_to_be64(mac << 16);
89	memcpy(&gid[10], &be_mac, ETH_ALEN);
90	gid[5] = port;
91
92	err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
93	if (err)
94		mlx4_warn(dev, "Failed Attaching Unicast\n");
95
96	return err;
97}
98
99static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
100				  u64 mac, int qpn)
101{
102	struct mlx4_qp qp;
103	u8 gid[16] = {0};
104	__be64 be_mac;
105
106	qp.qpn = qpn;
107	mac &= 0xffffffffffffULL;
108	be_mac = cpu_to_be64(mac << 16);
109	memcpy(&gid[10], &be_mac, ETH_ALEN);
110	gid[5] = port;
111
112	mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
113}
114
115static int validate_index(struct mlx4_dev *dev,
116			  struct mlx4_mac_table *table, int index)
117{
118	int err = 0;
119
120	if (index < 0 || index >= table->max || !table->entries[index]) {
121		mlx4_warn(dev, "No valid Mac entry for the given index\n");
122		err = -EINVAL;
123	}
124	return err;
125}
126
127static int find_index(struct mlx4_dev *dev,
128		      struct mlx4_mac_table *table, u64 mac)
129{
130	int i;
131
132	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
133		if ((mac & MLX4_MAC_MASK) ==
134		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
135			return i;
136	}
137	/* Mac not found */
138	return -EINVAL;
139}
140
141int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
142{
143	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
144	struct mlx4_mac_entry *entry;
145	int index = 0;
146	int err = 0;
147
148	mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
149			(unsigned long long) mac);
150	index = mlx4_register_mac(dev, port, mac);
151	if (index < 0) {
152		err = index;
153		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
154			 (unsigned long long) mac);
155		return err;
156	}
157
158	if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
159		*qpn = info->base_qpn + index;
160		return 0;
161	}
162
163	err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
164	mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
165	if (err) {
166		mlx4_err(dev, "Failed to reserve qp for mac registration\n");
167		goto qp_err;
168	}
169
170	err = mlx4_uc_steer_add(dev, port, mac, qpn);
171	if (err)
172		goto steer_err;
173
174	entry = kmalloc(sizeof *entry, GFP_KERNEL);
175	if (!entry) {
176		err = -ENOMEM;
177		goto alloc_err;
178	}
179	entry->mac = mac;
180	err = radix_tree_insert(&info->mac_tree, *qpn, entry);
181	if (err)
182		goto insert_err;
183	return 0;
184
185insert_err:
186	kfree(entry);
187
188alloc_err:
189	mlx4_uc_steer_release(dev, port, mac, *qpn);
190
191steer_err:
192	mlx4_qp_release_range(dev, *qpn, 1);
193
194qp_err:
195	mlx4_unregister_mac(dev, port, mac);
196	return err;
197}
198EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
199
200void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
201{
202	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
203	struct mlx4_mac_entry *entry;
204
205	mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
206		 (unsigned long long) mac);
207	mlx4_unregister_mac(dev, port, mac);
208
209	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
210		entry = radix_tree_lookup(&info->mac_tree, qpn);
211		if (entry) {
212			mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
213				 " qpn %d\n", port,
214				 (unsigned long long) mac, qpn);
215			mlx4_uc_steer_release(dev, port, entry->mac, qpn);
216			mlx4_qp_release_range(dev, qpn, 1);
217			radix_tree_delete(&info->mac_tree, qpn);
218			kfree(entry);
219		}
220	}
221}
222EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
223
224static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
225				   __be64 *entries)
226{
227	struct mlx4_cmd_mailbox *mailbox;
228	u32 in_mod;
229	int err;
230
231	mailbox = mlx4_alloc_cmd_mailbox(dev);
232	if (IS_ERR(mailbox))
233		return PTR_ERR(mailbox);
234
235	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
236
237	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
238
239	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
240		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
241
242	mlx4_free_cmd_mailbox(dev, mailbox);
243	return err;
244}
245
246int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
247{
248	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
249	struct mlx4_mac_table *table = &info->mac_table;
250	int i, err = 0;
251	int free = -1;
252
253	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
254		 (unsigned long long) mac, port);
255
256	mutex_lock(&table->mutex);
257	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
258		if (free < 0 && !table->entries[i]) {
259			free = i;
260			continue;
261		}
262
263		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
264			/* MAC already registered, Must not have duplicates */
265			err = -EEXIST;
266			goto out;
267		}
268	}
269
270	mlx4_dbg(dev, "Free MAC index is %d\n", free);
271
272	if (table->total == table->max) {
273		/* No free mac entries */
274		err = -ENOSPC;
275		goto out;
276	}
277
278	/* Register new MAC */
279	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
280
281	err = mlx4_set_port_mac_table(dev, port, table->entries);
282	if (unlikely(err)) {
283		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
284			 (unsigned long long) mac);
285		table->entries[free] = 0;
286		goto out;
287	}
288
289	err = free;
290	++table->total;
291out:
292	mutex_unlock(&table->mutex);
293	return err;
294}
295EXPORT_SYMBOL_GPL(__mlx4_register_mac);
296
297int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
298{
299	u64 out_param;
300	int err;
301
302	if (mlx4_is_mfunc(dev)) {
303		set_param_l(&out_param, port);
304		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
305				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
306				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
307		if (err)
308			return err;
309
310		return get_param_l(&out_param);
311	}
312	return __mlx4_register_mac(dev, port, mac);
313}
314EXPORT_SYMBOL_GPL(mlx4_register_mac);
315
316
317void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
318{
319	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
320	struct mlx4_mac_table *table = &info->mac_table;
321	int index;
322
323	index = find_index(dev, table, mac);
324
325	mutex_lock(&table->mutex);
326
327	if (validate_index(dev, table, index))
328		goto out;
329
330	table->entries[index] = 0;
331	mlx4_set_port_mac_table(dev, port, table->entries);
332	--table->total;
333out:
334	mutex_unlock(&table->mutex);
335}
336EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
337
338void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
339{
340	u64 out_param;
341	int err;
342
343	if (mlx4_is_mfunc(dev)) {
344		set_param_l(&out_param, port);
345		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
346				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
347				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
348		return;
349	}
350	__mlx4_unregister_mac(dev, port, mac);
351	return;
352}
353EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
354
355int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
356{
357	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
358	struct mlx4_mac_table *table = &info->mac_table;
359	struct mlx4_mac_entry *entry;
360	int index = qpn - info->base_qpn;
361	int err = 0;
362
363	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
364		entry = radix_tree_lookup(&info->mac_tree, qpn);
365		if (!entry)
366			return -EINVAL;
367		mlx4_uc_steer_release(dev, port, entry->mac, qpn);
368		mlx4_unregister_mac(dev, port, entry->mac);
369		entry->mac = new_mac;
370		mlx4_register_mac(dev, port, new_mac);
371		err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
372		return err;
373	}
374
375	/* CX1 doesn't support multi-functions */
376	mutex_lock(&table->mutex);
377
378	err = validate_index(dev, table, index);
379	if (err)
380		goto out;
381
382	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
383
384	err = mlx4_set_port_mac_table(dev, port, table->entries);
385	if (unlikely(err)) {
386		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
387			 (unsigned long long) new_mac);
388		table->entries[index] = 0;
389	}
390out:
391	mutex_unlock(&table->mutex);
392	return err;
393}
394EXPORT_SYMBOL_GPL(mlx4_replace_mac);
395
396static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
397				    __be32 *entries)
398{
399	struct mlx4_cmd_mailbox *mailbox;
400	u32 in_mod;
401	int err;
402
403	mailbox = mlx4_alloc_cmd_mailbox(dev);
404	if (IS_ERR(mailbox))
405		return PTR_ERR(mailbox);
406
407	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
408	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
409	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
410		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
411
412	mlx4_free_cmd_mailbox(dev, mailbox);
413
414	return err;
415}
416
417int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
418{
419	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
420	int i;
421
422	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
423		if (table->refs[i] &&
424		    (vid == (MLX4_VLAN_MASK &
425			      be32_to_cpu(table->entries[i])))) {
426			/* VLAN already registered, increase reference count */
427			*idx = i;
428			return 0;
429		}
430	}
431
432	return -ENOENT;
433}
434EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
435
436static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
437				int *index)
438{
439	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
440	int i, err = 0;
441	int free = -1;
442
443	mutex_lock(&table->mutex);
444
445	if (table->total == table->max) {
446		/* No free vlan entries */
447		err = -ENOSPC;
448		goto out;
449	}
450
451	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
452		if (free < 0 && (table->refs[i] == 0)) {
453			free = i;
454			continue;
455		}
456
457		if (table->refs[i] &&
458		    (vlan == (MLX4_VLAN_MASK &
459			      be32_to_cpu(table->entries[i])))) {
460			/* Vlan already registered, increase references count */
461			*index = i;
462			++table->refs[i];
463			goto out;
464		}
465	}
466
467	if (free < 0) {
468		err = -ENOMEM;
469		goto out;
470	}
471
472	/* Register new VLAN */
473	table->refs[free] = 1;
474	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
475
476	err = mlx4_set_port_vlan_table(dev, port, table->entries);
477	if (unlikely(err)) {
478		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
479		table->refs[free] = 0;
480		table->entries[free] = 0;
481		goto out;
482	}
483
484	*index = free;
485	++table->total;
486out:
487	mutex_unlock(&table->mutex);
488	return err;
489}
490
491int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
492{
493	u64 out_param;
494	int err;
495
496	if (mlx4_is_mfunc(dev)) {
497		set_param_l(&out_param, port);
498		err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
499				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
500				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
501		if (!err)
502			*index = get_param_l(&out_param);
503
504		return err;
505	}
506	return __mlx4_register_vlan(dev, port, vlan, index);
507}
508EXPORT_SYMBOL_GPL(mlx4_register_vlan);
509
510static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
511{
512	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
513
514	if (index < MLX4_VLAN_REGULAR) {
515		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
516		return;
517	}
518
519	mutex_lock(&table->mutex);
520	if (!table->refs[index]) {
521		mlx4_warn(dev, "No vlan entry for index %d\n", index);
522		goto out;
523	}
524	if (--table->refs[index]) {
525		mlx4_dbg(dev, "Have more references for index %d,"
526			 "no need to modify vlan table\n", index);
527		goto out;
528	}
529	table->entries[index] = 0;
530	mlx4_set_port_vlan_table(dev, port, table->entries);
531	--table->total;
532out:
533	mutex_unlock(&table->mutex);
534}
535
536void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
537{
538	u64 in_param;
539	int err;
540
541	if (mlx4_is_mfunc(dev)) {
542		set_param_l(&in_param, port);
543		err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
544			       MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
545			       MLX4_CMD_WRAPPED);
546		if (!err)
547			mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
548					index);
549
550		return;
551	}
552	__mlx4_unregister_vlan(dev, port, index);
553}
554EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
555
556int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
557{
558	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
559	u8 *inbuf, *outbuf;
560	int err;
561
562	inmailbox = mlx4_alloc_cmd_mailbox(dev);
563	if (IS_ERR(inmailbox))
564		return PTR_ERR(inmailbox);
565
566	outmailbox = mlx4_alloc_cmd_mailbox(dev);
567	if (IS_ERR(outmailbox)) {
568		mlx4_free_cmd_mailbox(dev, inmailbox);
569		return PTR_ERR(outmailbox);
570	}
571
572	inbuf = inmailbox->buf;
573	outbuf = outmailbox->buf;
574	memset(inbuf, 0, 256);
575	memset(outbuf, 0, 256);
576	inbuf[0] = 1;
577	inbuf[1] = 1;
578	inbuf[2] = 1;
579	inbuf[3] = 1;
580	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
581	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
582
583	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
584			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
585			   MLX4_CMD_NATIVE);
586	if (!err)
587		*caps = *(__be32 *) (outbuf + 84);
588	mlx4_free_cmd_mailbox(dev, inmailbox);
589	mlx4_free_cmd_mailbox(dev, outmailbox);
590	return err;
591}
592
593static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
594				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
595{
596	struct mlx4_priv *priv = mlx4_priv(dev);
597	struct mlx4_port_info *port_info;
598	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
599	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
600	struct mlx4_set_port_rqp_calc_context *qpn_context;
601	struct mlx4_set_port_general_context *gen_context;
602	int reset_qkey_viols;
603	int port;
604	int is_eth;
605	u32 in_modifier;
606	u32 promisc;
607	u16 mtu, prev_mtu;
608	int err;
609	int i;
610	__be32 agg_cap_mask;
611	__be32 slave_cap_mask;
612	__be32 new_cap_mask;
613
614	port = in_mod & 0xff;
615	in_modifier = in_mod >> 8;
616	is_eth = op_mod;
617	port_info = &priv->port[port];
618
619	/* Slaves cannot perform SET_PORT operations except changing MTU */
620	if (is_eth) {
621		if (slave != dev->caps.function &&
622		    in_modifier != MLX4_SET_PORT_GENERAL) {
623			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
624					slave);
625			return -EINVAL;
626		}
627		switch (in_modifier) {
628		case MLX4_SET_PORT_RQP_CALC:
629			qpn_context = inbox->buf;
630			qpn_context->base_qpn =
631				cpu_to_be32(port_info->base_qpn);
632			qpn_context->n_mac = 0x7;
633			promisc = be32_to_cpu(qpn_context->promisc) >>
634				SET_PORT_PROMISC_SHIFT;
635			qpn_context->promisc = cpu_to_be32(
636				promisc << SET_PORT_PROMISC_SHIFT |
637				port_info->base_qpn);
638			promisc = be32_to_cpu(qpn_context->mcast) >>
639				SET_PORT_MC_PROMISC_SHIFT;
640			qpn_context->mcast = cpu_to_be32(
641				promisc << SET_PORT_MC_PROMISC_SHIFT |
642				port_info->base_qpn);
643			break;
644		case MLX4_SET_PORT_GENERAL:
645			gen_context = inbox->buf;
646			/* Mtu is configured as the max MTU among all the
647			 * the functions on the port. */
648			mtu = be16_to_cpu(gen_context->mtu);
649			mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
650			prev_mtu = slave_st->mtu[port];
651			slave_st->mtu[port] = mtu;
652			if (mtu > master->max_mtu[port])
653				master->max_mtu[port] = mtu;
654			if (mtu < prev_mtu && prev_mtu ==
655						master->max_mtu[port]) {
656				slave_st->mtu[port] = mtu;
657				master->max_mtu[port] = mtu;
658				for (i = 0; i < dev->num_slaves; i++) {
659					master->max_mtu[port] =
660					max(master->max_mtu[port],
661					    master->slave_state[i].mtu[port]);
662				}
663			}
664
665			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
666			break;
667		}
668		return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
669				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
670				MLX4_CMD_NATIVE);
671	}
672
673	/* For IB, we only consider:
674	 * - The capability mask, which is set to the aggregate of all
675	 *   slave function capabilities
676	 * - The QKey violatin counter - reset according to each request.
677	 */
678
679	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
680		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
681		new_cap_mask = ((__be32 *) inbox->buf)[2];
682	} else {
683		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
684		new_cap_mask = ((__be32 *) inbox->buf)[1];
685	}
686
687	agg_cap_mask = 0;
688	slave_cap_mask =
689		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
690	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
691	for (i = 0; i < dev->num_slaves; i++)
692		agg_cap_mask |=
693			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
694
695	/* only clear mailbox for guests.  Master may be setting
696	* MTU or PKEY table size
697	*/
698	if (slave != dev->caps.function)
699		memset(inbox->buf, 0, 256);
700	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
701		*(u8 *) inbox->buf	   = !!reset_qkey_viols << 6;
702		((__be32 *) inbox->buf)[2] = agg_cap_mask;
703	} else {
704		((u8 *) inbox->buf)[3]     = !!reset_qkey_viols;
705		((__be32 *) inbox->buf)[1] = agg_cap_mask;
706	}
707
708	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
709		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
710	if (err)
711		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
712			slave_cap_mask;
713	return err;
714}
715
716int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
717			  struct mlx4_vhcr *vhcr,
718			  struct mlx4_cmd_mailbox *inbox,
719			  struct mlx4_cmd_mailbox *outbox,
720			  struct mlx4_cmd_info *cmd)
721{
722	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
723				    vhcr->op_modifier, inbox);
724}
725
726/* bit locations for set port command with zero op modifier */
727enum {
728	MLX4_SET_PORT_VL_CAP	 = 4, /* bits 7:4 */
729	MLX4_SET_PORT_MTU_CAP	 = 12, /* bits 15:12 */
730	MLX4_CHANGE_PORT_VL_CAP	 = 21,
731	MLX4_CHANGE_PORT_MTU_CAP = 22,
732};
733
734int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
735{
736	struct mlx4_cmd_mailbox *mailbox;
737	int err, vl_cap;
738
739	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
740		return 0;
741
742	mailbox = mlx4_alloc_cmd_mailbox(dev);
743	if (IS_ERR(mailbox))
744		return PTR_ERR(mailbox);
745
746	memset(mailbox->buf, 0, 256);
747
748	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
749
750	/* IB VL CAP enum isn't used by the firmware, just numerical values */
751	for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
752		((__be32 *) mailbox->buf)[0] = cpu_to_be32(
753			(1 << MLX4_CHANGE_PORT_MTU_CAP) |
754			(1 << MLX4_CHANGE_PORT_VL_CAP)  |
755			(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
756			(vl_cap << MLX4_SET_PORT_VL_CAP));
757		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
758				MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
759		if (err != -ENOMEM)
760			break;
761	}
762
763	mlx4_free_cmd_mailbox(dev, mailbox);
764	return err;
765}
766
767int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
768			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
769{
770	struct mlx4_cmd_mailbox *mailbox;
771	struct mlx4_set_port_general_context *context;
772	int err;
773	u32 in_mod;
774
775	mailbox = mlx4_alloc_cmd_mailbox(dev);
776	if (IS_ERR(mailbox))
777		return PTR_ERR(mailbox);
778	context = mailbox->buf;
779	memset(context, 0, sizeof *context);
780
781	context->flags = SET_PORT_GEN_ALL_VALID;
782	context->mtu = cpu_to_be16(mtu);
783	context->pptx = (pptx * (!pfctx)) << 7;
784	context->pfctx = pfctx;
785	context->pprx = (pprx * (!pfcrx)) << 7;
786	context->pfcrx = pfcrx;
787
788	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
789	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
790		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
791
792	mlx4_free_cmd_mailbox(dev, mailbox);
793	return err;
794}
795EXPORT_SYMBOL(mlx4_SET_PORT_general);
796
797int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
798			   u8 promisc)
799{
800	struct mlx4_cmd_mailbox *mailbox;
801	struct mlx4_set_port_rqp_calc_context *context;
802	int err;
803	u32 in_mod;
804	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
805		MCAST_DIRECT : MCAST_DEFAULT;
806
807	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
808	    dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
809		return 0;
810
811	mailbox = mlx4_alloc_cmd_mailbox(dev);
812	if (IS_ERR(mailbox))
813		return PTR_ERR(mailbox);
814	context = mailbox->buf;
815	memset(context, 0, sizeof *context);
816
817	context->base_qpn = cpu_to_be32(base_qpn);
818	context->n_mac = dev->caps.log_num_macs;
819	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
820				       base_qpn);
821	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
822				     base_qpn);
823	context->intra_no_vlan = 0;
824	context->no_vlan = MLX4_NO_VLAN_IDX;
825	context->intra_vlan_miss = 0;
826	context->vlan_miss = MLX4_VLAN_MISS_IDX;
827
828	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
829	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
830		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
831
832	mlx4_free_cmd_mailbox(dev, mailbox);
833	return err;
834}
835EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
836
837int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
838				struct mlx4_vhcr *vhcr,
839				struct mlx4_cmd_mailbox *inbox,
840				struct mlx4_cmd_mailbox *outbox,
841				struct mlx4_cmd_info *cmd)
842{
843	int err = 0;
844
845	return err;
846}
847
848int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
849			u64 mac, u64 clear, u8 mode)
850{
851	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
852			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
853			MLX4_CMD_WRAPPED);
854}
855EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
856
857int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
858			       struct mlx4_vhcr *vhcr,
859			       struct mlx4_cmd_mailbox *inbox,
860			       struct mlx4_cmd_mailbox *outbox,
861			       struct mlx4_cmd_info *cmd)
862{
863	int err = 0;
864
865	return err;
866}
867
868int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
869			       u32 in_mod, struct mlx4_cmd_mailbox *outbox)
870{
871	return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
872			    MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
873			    MLX4_CMD_NATIVE);
874}
875
876int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
877				struct mlx4_vhcr *vhcr,
878				struct mlx4_cmd_mailbox *inbox,
879				struct mlx4_cmd_mailbox *outbox,
880				struct mlx4_cmd_info *cmd)
881{
882	if (slave != dev->caps.function)
883		return 0;
884	return mlx4_common_dump_eth_stats(dev, slave,
885					  vhcr->in_modifier, outbox);
886}
887
888void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
889{
890	if (!mlx4_is_mfunc(dev)) {
891		*stats_bitmap = 0;
892		return;
893	}
894
895	*stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
896			 MLX4_STATS_TRAFFIC_DROPS_MASK |
897			 MLX4_STATS_PORT_COUNTERS_MASK);
898
899	if (mlx4_is_master(dev))
900		*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
901}
902EXPORT_SYMBOL(mlx4_set_stats_bitmap);
903