en_ethtool.c revision 537fae0101c8853abb52136788173dde74b9d1e9
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h>
37#include <linux/mlx4/driver.h>
38#include <linux/in.h>
39#include <net/ip.h>
40
41#include "mlx4_en.h"
42#include "en_port.h"
43
44#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
45#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
46#define EN_ETHTOOL_WORD_MASK  cpu_to_be32(0xffffffff)
47
48static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
49{
50	int i;
51	int err = 0;
52
53	for (i = 0; i < priv->tx_ring_num; i++) {
54		priv->tx_cq[i]->moder_cnt = priv->tx_frames;
55		priv->tx_cq[i]->moder_time = priv->tx_usecs;
56		if (priv->port_up) {
57			err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
58			if (err)
59				return err;
60		}
61	}
62
63	if (priv->adaptive_rx_coal)
64		return 0;
65
66	for (i = 0; i < priv->rx_ring_num; i++) {
67		priv->rx_cq[i]->moder_cnt = priv->rx_frames;
68		priv->rx_cq[i]->moder_time = priv->rx_usecs;
69		priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
70		if (priv->port_up) {
71			err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
72			if (err)
73				return err;
74		}
75	}
76
77	return err;
78}
79
80static void
81mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
82{
83	struct mlx4_en_priv *priv = netdev_priv(dev);
84	struct mlx4_en_dev *mdev = priv->mdev;
85
86	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
87	strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
88		sizeof(drvinfo->version));
89	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
90		"%d.%d.%d",
91		(u16) (mdev->dev->caps.fw_ver >> 32),
92		(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
93		(u16) (mdev->dev->caps.fw_ver & 0xffff));
94	strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
95		sizeof(drvinfo->bus_info));
96	drvinfo->n_stats = 0;
97	drvinfo->regdump_len = 0;
98	drvinfo->eedump_len = 0;
99}
100
101static const char main_strings[][ETH_GSTRING_LEN] = {
102	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
103	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
104	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
105	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
106	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
107	"tx_heartbeat_errors", "tx_window_errors",
108
109	/* port statistics */
110	"tso_packets",
111	"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
112	"rx_csum_good", "rx_csum_none", "tx_chksum_offload",
113
114	/* packet statistics */
115	"broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
116	"rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
117	"tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
118	"tx_prio_6", "tx_prio_7",
119};
120#define NUM_MAIN_STATS	21
121#define NUM_ALL_STATS	(NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
122
123static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
124	"Interrupt Test",
125	"Link Test",
126	"Speed Test",
127	"Register Test",
128	"Loopback Test",
129};
130
131static u32 mlx4_en_get_msglevel(struct net_device *dev)
132{
133	return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
134}
135
136static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
137{
138	((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
139}
140
141static void mlx4_en_get_wol(struct net_device *netdev,
142			    struct ethtool_wolinfo *wol)
143{
144	struct mlx4_en_priv *priv = netdev_priv(netdev);
145	int err = 0;
146	u64 config = 0;
147	u64 mask;
148
149	if ((priv->port < 1) || (priv->port > 2)) {
150		en_err(priv, "Failed to get WoL information\n");
151		return;
152	}
153
154	mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
155		MLX4_DEV_CAP_FLAG_WOL_PORT2;
156
157	if (!(priv->mdev->dev->caps.flags & mask)) {
158		wol->supported = 0;
159		wol->wolopts = 0;
160		return;
161	}
162
163	err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
164	if (err) {
165		en_err(priv, "Failed to get WoL information\n");
166		return;
167	}
168
169	if (config & MLX4_EN_WOL_MAGIC)
170		wol->supported = WAKE_MAGIC;
171	else
172		wol->supported = 0;
173
174	if (config & MLX4_EN_WOL_ENABLED)
175		wol->wolopts = WAKE_MAGIC;
176	else
177		wol->wolopts = 0;
178}
179
180static int mlx4_en_set_wol(struct net_device *netdev,
181			    struct ethtool_wolinfo *wol)
182{
183	struct mlx4_en_priv *priv = netdev_priv(netdev);
184	u64 config = 0;
185	int err = 0;
186	u64 mask;
187
188	if ((priv->port < 1) || (priv->port > 2))
189		return -EOPNOTSUPP;
190
191	mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
192		MLX4_DEV_CAP_FLAG_WOL_PORT2;
193
194	if (!(priv->mdev->dev->caps.flags & mask))
195		return -EOPNOTSUPP;
196
197	if (wol->supported & ~WAKE_MAGIC)
198		return -EINVAL;
199
200	err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
201	if (err) {
202		en_err(priv, "Failed to get WoL info, unable to modify\n");
203		return err;
204	}
205
206	if (wol->wolopts & WAKE_MAGIC) {
207		config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
208				MLX4_EN_WOL_MAGIC;
209	} else {
210		config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
211		config |= MLX4_EN_WOL_DO_MODIFY;
212	}
213
214	err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
215	if (err)
216		en_err(priv, "Failed to set WoL information\n");
217
218	return err;
219}
220
221static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
222{
223	struct mlx4_en_priv *priv = netdev_priv(dev);
224	int bit_count = hweight64(priv->stats_bitmap);
225
226	switch (sset) {
227	case ETH_SS_STATS:
228		return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
229			(priv->tx_ring_num * 2) +
230#ifdef CONFIG_NET_RX_BUSY_POLL
231			(priv->rx_ring_num * 5);
232#else
233			(priv->rx_ring_num * 2);
234#endif
235	case ETH_SS_TEST:
236		return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
237					& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
238	default:
239		return -EOPNOTSUPP;
240	}
241}
242
243static void mlx4_en_get_ethtool_stats(struct net_device *dev,
244		struct ethtool_stats *stats, uint64_t *data)
245{
246	struct mlx4_en_priv *priv = netdev_priv(dev);
247	int index = 0;
248	int i, j = 0;
249
250	spin_lock_bh(&priv->stats_lock);
251
252	if (!(priv->stats_bitmap)) {
253		for (i = 0; i < NUM_MAIN_STATS; i++)
254			data[index++] =
255				((unsigned long *) &priv->stats)[i];
256		for (i = 0; i < NUM_PORT_STATS; i++)
257			data[index++] =
258				((unsigned long *) &priv->port_stats)[i];
259		for (i = 0; i < NUM_PKT_STATS; i++)
260			data[index++] =
261				((unsigned long *) &priv->pkstats)[i];
262	} else {
263		for (i = 0; i < NUM_MAIN_STATS; i++) {
264			if ((priv->stats_bitmap >> j) & 1)
265				data[index++] =
266				((unsigned long *) &priv->stats)[i];
267			j++;
268		}
269		for (i = 0; i < NUM_PORT_STATS; i++) {
270			if ((priv->stats_bitmap >> j) & 1)
271				data[index++] =
272				((unsigned long *) &priv->port_stats)[i];
273			j++;
274		}
275	}
276	for (i = 0; i < priv->tx_ring_num; i++) {
277		data[index++] = priv->tx_ring[i]->packets;
278		data[index++] = priv->tx_ring[i]->bytes;
279	}
280	for (i = 0; i < priv->rx_ring_num; i++) {
281		data[index++] = priv->rx_ring[i]->packets;
282		data[index++] = priv->rx_ring[i]->bytes;
283#ifdef CONFIG_NET_RX_BUSY_POLL
284		data[index++] = priv->rx_ring[i]->yields;
285		data[index++] = priv->rx_ring[i]->misses;
286		data[index++] = priv->rx_ring[i]->cleaned;
287#endif
288	}
289	spin_unlock_bh(&priv->stats_lock);
290
291}
292
293static void mlx4_en_self_test(struct net_device *dev,
294			      struct ethtool_test *etest, u64 *buf)
295{
296	mlx4_en_ex_selftest(dev, &etest->flags, buf);
297}
298
299static void mlx4_en_get_strings(struct net_device *dev,
300				uint32_t stringset, uint8_t *data)
301{
302	struct mlx4_en_priv *priv = netdev_priv(dev);
303	int index = 0;
304	int i;
305
306	switch (stringset) {
307	case ETH_SS_TEST:
308		for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
309			strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
310		if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
311			for (; i < MLX4_EN_NUM_SELF_TEST; i++)
312				strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
313		break;
314
315	case ETH_SS_STATS:
316		/* Add main counters */
317		if (!priv->stats_bitmap) {
318			for (i = 0; i < NUM_MAIN_STATS; i++)
319				strcpy(data + (index++) * ETH_GSTRING_LEN,
320					main_strings[i]);
321			for (i = 0; i < NUM_PORT_STATS; i++)
322				strcpy(data + (index++) * ETH_GSTRING_LEN,
323					main_strings[i +
324					NUM_MAIN_STATS]);
325			for (i = 0; i < NUM_PKT_STATS; i++)
326				strcpy(data + (index++) * ETH_GSTRING_LEN,
327					main_strings[i +
328					NUM_MAIN_STATS +
329					NUM_PORT_STATS]);
330		} else
331			for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
332				if ((priv->stats_bitmap >> i) & 1) {
333					strcpy(data +
334					       (index++) * ETH_GSTRING_LEN,
335					       main_strings[i]);
336				}
337				if (!(priv->stats_bitmap >> i))
338					break;
339			}
340		for (i = 0; i < priv->tx_ring_num; i++) {
341			sprintf(data + (index++) * ETH_GSTRING_LEN,
342				"tx%d_packets", i);
343			sprintf(data + (index++) * ETH_GSTRING_LEN,
344				"tx%d_bytes", i);
345		}
346		for (i = 0; i < priv->rx_ring_num; i++) {
347			sprintf(data + (index++) * ETH_GSTRING_LEN,
348				"rx%d_packets", i);
349			sprintf(data + (index++) * ETH_GSTRING_LEN,
350				"rx%d_bytes", i);
351#ifdef CONFIG_NET_RX_BUSY_POLL
352			sprintf(data + (index++) * ETH_GSTRING_LEN,
353				"rx%d_napi_yield", i);
354			sprintf(data + (index++) * ETH_GSTRING_LEN,
355				"rx%d_misses", i);
356			sprintf(data + (index++) * ETH_GSTRING_LEN,
357				"rx%d_cleaned", i);
358#endif
359		}
360		break;
361	}
362}
363
364static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
365{
366	struct mlx4_en_priv *priv = netdev_priv(dev);
367	int trans_type;
368
369	cmd->autoneg = AUTONEG_DISABLE;
370	cmd->supported = SUPPORTED_10000baseT_Full;
371	cmd->advertising = ADVERTISED_10000baseT_Full;
372
373	if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
374		return -ENOMEM;
375
376	trans_type = priv->port_state.transciver;
377	if (netif_carrier_ok(dev)) {
378		ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
379		cmd->duplex = DUPLEX_FULL;
380	} else {
381		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
382		cmd->duplex = DUPLEX_UNKNOWN;
383	}
384
385	if (trans_type > 0 && trans_type <= 0xC) {
386		cmd->port = PORT_FIBRE;
387		cmd->transceiver = XCVR_EXTERNAL;
388		cmd->supported |= SUPPORTED_FIBRE;
389		cmd->advertising |= ADVERTISED_FIBRE;
390	} else if (trans_type == 0x80 || trans_type == 0) {
391		cmd->port = PORT_TP;
392		cmd->transceiver = XCVR_INTERNAL;
393		cmd->supported |= SUPPORTED_TP;
394		cmd->advertising |= ADVERTISED_TP;
395	} else  {
396		cmd->port = -1;
397		cmd->transceiver = -1;
398	}
399	return 0;
400}
401
402static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
403{
404	if ((cmd->autoneg == AUTONEG_ENABLE) ||
405	    (ethtool_cmd_speed(cmd) != SPEED_10000) ||
406	    (cmd->duplex != DUPLEX_FULL))
407		return -EINVAL;
408
409	/* Nothing to change */
410	return 0;
411}
412
413static int mlx4_en_get_coalesce(struct net_device *dev,
414			      struct ethtool_coalesce *coal)
415{
416	struct mlx4_en_priv *priv = netdev_priv(dev);
417
418	coal->tx_coalesce_usecs = priv->tx_usecs;
419	coal->tx_max_coalesced_frames = priv->tx_frames;
420	coal->rx_coalesce_usecs = priv->rx_usecs;
421	coal->rx_max_coalesced_frames = priv->rx_frames;
422
423	coal->pkt_rate_low = priv->pkt_rate_low;
424	coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
425	coal->pkt_rate_high = priv->pkt_rate_high;
426	coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
427	coal->rate_sample_interval = priv->sample_interval;
428	coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
429	return 0;
430}
431
432static int mlx4_en_set_coalesce(struct net_device *dev,
433			      struct ethtool_coalesce *coal)
434{
435	struct mlx4_en_priv *priv = netdev_priv(dev);
436
437	priv->rx_frames = (coal->rx_max_coalesced_frames ==
438			   MLX4_EN_AUTO_CONF) ?
439				MLX4_EN_RX_COAL_TARGET :
440				coal->rx_max_coalesced_frames;
441	priv->rx_usecs = (coal->rx_coalesce_usecs ==
442			  MLX4_EN_AUTO_CONF) ?
443				MLX4_EN_RX_COAL_TIME :
444				coal->rx_coalesce_usecs;
445
446	/* Setting TX coalescing parameters */
447	if (coal->tx_coalesce_usecs != priv->tx_usecs ||
448	    coal->tx_max_coalesced_frames != priv->tx_frames) {
449		priv->tx_usecs = coal->tx_coalesce_usecs;
450		priv->tx_frames = coal->tx_max_coalesced_frames;
451	}
452
453	/* Set adaptive coalescing params */
454	priv->pkt_rate_low = coal->pkt_rate_low;
455	priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
456	priv->pkt_rate_high = coal->pkt_rate_high;
457	priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
458	priv->sample_interval = coal->rate_sample_interval;
459	priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
460
461	return mlx4_en_moderation_update(priv);
462}
463
464static int mlx4_en_set_pauseparam(struct net_device *dev,
465				struct ethtool_pauseparam *pause)
466{
467	struct mlx4_en_priv *priv = netdev_priv(dev);
468	struct mlx4_en_dev *mdev = priv->mdev;
469	int err;
470
471	priv->prof->tx_pause = pause->tx_pause != 0;
472	priv->prof->rx_pause = pause->rx_pause != 0;
473	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
474				    priv->rx_skb_size + ETH_FCS_LEN,
475				    priv->prof->tx_pause,
476				    priv->prof->tx_ppp,
477				    priv->prof->rx_pause,
478				    priv->prof->rx_ppp);
479	if (err)
480		en_err(priv, "Failed setting pause params\n");
481
482	return err;
483}
484
485static void mlx4_en_get_pauseparam(struct net_device *dev,
486				 struct ethtool_pauseparam *pause)
487{
488	struct mlx4_en_priv *priv = netdev_priv(dev);
489
490	pause->tx_pause = priv->prof->tx_pause;
491	pause->rx_pause = priv->prof->rx_pause;
492}
493
494static int mlx4_en_set_ringparam(struct net_device *dev,
495				 struct ethtool_ringparam *param)
496{
497	struct mlx4_en_priv *priv = netdev_priv(dev);
498	struct mlx4_en_dev *mdev = priv->mdev;
499	u32 rx_size, tx_size;
500	int port_up = 0;
501	int err = 0;
502
503	if (param->rx_jumbo_pending || param->rx_mini_pending)
504		return -EINVAL;
505
506	rx_size = roundup_pow_of_two(param->rx_pending);
507	rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
508	rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
509	tx_size = roundup_pow_of_two(param->tx_pending);
510	tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
511	tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
512
513	if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
514					priv->rx_ring[0]->size) &&
515	    tx_size == priv->tx_ring[0]->size)
516		return 0;
517
518	mutex_lock(&mdev->state_lock);
519	if (priv->port_up) {
520		port_up = 1;
521		mlx4_en_stop_port(dev, 1);
522	}
523
524	mlx4_en_free_resources(priv);
525
526	priv->prof->tx_ring_size = tx_size;
527	priv->prof->rx_ring_size = rx_size;
528
529	err = mlx4_en_alloc_resources(priv);
530	if (err) {
531		en_err(priv, "Failed reallocating port resources\n");
532		goto out;
533	}
534	if (port_up) {
535		err = mlx4_en_start_port(dev);
536		if (err)
537			en_err(priv, "Failed starting port\n");
538	}
539
540	err = mlx4_en_moderation_update(priv);
541
542out:
543	mutex_unlock(&mdev->state_lock);
544	return err;
545}
546
547static void mlx4_en_get_ringparam(struct net_device *dev,
548				  struct ethtool_ringparam *param)
549{
550	struct mlx4_en_priv *priv = netdev_priv(dev);
551
552	memset(param, 0, sizeof(*param));
553	param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
554	param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
555	param->rx_pending = priv->port_up ?
556		priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
557	param->tx_pending = priv->tx_ring[0]->size;
558}
559
560static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
561{
562	struct mlx4_en_priv *priv = netdev_priv(dev);
563
564	return priv->rx_ring_num;
565}
566
567static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
568{
569	struct mlx4_en_priv *priv = netdev_priv(dev);
570	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
571	int rss_rings;
572	size_t n = priv->rx_ring_num;
573	int err = 0;
574
575	rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
576
577	while (n--) {
578		ring_index[n] = rss_map->qps[n % rss_rings].qpn -
579			rss_map->base_qpn;
580	}
581
582	return err;
583}
584
585static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
586			    const u8 *key)
587{
588	struct mlx4_en_priv *priv = netdev_priv(dev);
589	struct mlx4_en_dev *mdev = priv->mdev;
590	int port_up = 0;
591	int err = 0;
592	int i;
593	int rss_rings = 0;
594
595	/* Calculate RSS table size and make sure flows are spread evenly
596	 * between rings
597	 */
598	for (i = 0; i < priv->rx_ring_num; i++) {
599		if (i > 0 && !ring_index[i] && !rss_rings)
600			rss_rings = i;
601
602		if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
603			return -EINVAL;
604	}
605
606	if (!rss_rings)
607		rss_rings = priv->rx_ring_num;
608
609	/* RSS table size must be an order of 2 */
610	if (!is_power_of_2(rss_rings))
611		return -EINVAL;
612
613	mutex_lock(&mdev->state_lock);
614	if (priv->port_up) {
615		port_up = 1;
616		mlx4_en_stop_port(dev, 1);
617	}
618
619	priv->prof->rss_rings = rss_rings;
620
621	if (port_up) {
622		err = mlx4_en_start_port(dev);
623		if (err)
624			en_err(priv, "Failed starting port\n");
625	}
626
627	mutex_unlock(&mdev->state_lock);
628	return err;
629}
630
631#define all_zeros_or_all_ones(field)		\
632	((field) == 0 || (field) == (__force typeof(field))-1)
633
634static int mlx4_en_validate_flow(struct net_device *dev,
635				 struct ethtool_rxnfc *cmd)
636{
637	struct ethtool_usrip4_spec *l3_mask;
638	struct ethtool_tcpip4_spec *l4_mask;
639	struct ethhdr *eth_mask;
640
641	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
642		return -EINVAL;
643
644	if (cmd->fs.flow_type & FLOW_MAC_EXT) {
645		/* dest mac mask must be ff:ff:ff:ff:ff:ff */
646		if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
647			return -EINVAL;
648	}
649
650	switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
651	case TCP_V4_FLOW:
652	case UDP_V4_FLOW:
653		if (cmd->fs.m_u.tcp_ip4_spec.tos)
654			return -EINVAL;
655		l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
656		/* don't allow mask which isn't all 0 or 1 */
657		if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
658		    !all_zeros_or_all_ones(l4_mask->ip4dst) ||
659		    !all_zeros_or_all_ones(l4_mask->psrc) ||
660		    !all_zeros_or_all_ones(l4_mask->pdst))
661			return -EINVAL;
662		break;
663	case IP_USER_FLOW:
664		l3_mask = &cmd->fs.m_u.usr_ip4_spec;
665		if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
666		    cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
667		    (!l3_mask->ip4src && !l3_mask->ip4dst) ||
668		    !all_zeros_or_all_ones(l3_mask->ip4src) ||
669		    !all_zeros_or_all_ones(l3_mask->ip4dst))
670			return -EINVAL;
671		break;
672	case ETHER_FLOW:
673		eth_mask = &cmd->fs.m_u.ether_spec;
674		/* source mac mask must not be set */
675		if (!is_zero_ether_addr(eth_mask->h_source))
676			return -EINVAL;
677
678		/* dest mac mask must be ff:ff:ff:ff:ff:ff */
679		if (!is_broadcast_ether_addr(eth_mask->h_dest))
680			return -EINVAL;
681
682		if (!all_zeros_or_all_ones(eth_mask->h_proto))
683			return -EINVAL;
684		break;
685	default:
686		return -EINVAL;
687	}
688
689	if ((cmd->fs.flow_type & FLOW_EXT)) {
690		if (cmd->fs.m_ext.vlan_etype ||
691		    !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
692		      0 ||
693		      (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
694		      cpu_to_be16(VLAN_VID_MASK)))
695			return -EINVAL;
696
697		if (cmd->fs.m_ext.vlan_tci) {
698			if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
699				return -EINVAL;
700
701		}
702	}
703
704	return 0;
705}
706
707static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
708					struct list_head *rule_list_h,
709					struct mlx4_spec_list *spec_l2,
710					unsigned char *mac)
711{
712	int err = 0;
713	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
714
715	spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
716	memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
717	memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
718
719	if ((cmd->fs.flow_type & FLOW_EXT) &&
720	    (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
721		spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
722		spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
723	}
724
725	list_add_tail(&spec_l2->list, rule_list_h);
726
727	return err;
728}
729
730static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
731						struct ethtool_rxnfc *cmd,
732						struct list_head *rule_list_h,
733						struct mlx4_spec_list *spec_l2,
734						__be32 ipv4_dst)
735{
736#ifdef CONFIG_INET
737	unsigned char mac[ETH_ALEN];
738
739	if (!ipv4_is_multicast(ipv4_dst)) {
740		if (cmd->fs.flow_type & FLOW_MAC_EXT)
741			memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
742		else
743			memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
744	} else {
745		ip_eth_mc_map(ipv4_dst, mac);
746	}
747
748	return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
749#else
750	return -EINVAL;
751#endif
752}
753
754static int add_ip_rule(struct mlx4_en_priv *priv,
755		       struct ethtool_rxnfc *cmd,
756		       struct list_head *list_h)
757{
758	int err;
759	struct mlx4_spec_list *spec_l2 = NULL;
760	struct mlx4_spec_list *spec_l3 = NULL;
761	struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
762
763	spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
764	spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
765	if (!spec_l2 || !spec_l3) {
766		err = -ENOMEM;
767		goto free_spec;
768	}
769
770	err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
771						   cmd->fs.h_u.
772						   usr_ip4_spec.ip4dst);
773	if (err)
774		goto free_spec;
775	spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
776	spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
777	if (l3_mask->ip4src)
778		spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
779	spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
780	if (l3_mask->ip4dst)
781		spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
782	list_add_tail(&spec_l3->list, list_h);
783
784	return 0;
785
786free_spec:
787	kfree(spec_l2);
788	kfree(spec_l3);
789	return err;
790}
791
792static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
793			     struct ethtool_rxnfc *cmd,
794			     struct list_head *list_h, int proto)
795{
796	int err;
797	struct mlx4_spec_list *spec_l2 = NULL;
798	struct mlx4_spec_list *spec_l3 = NULL;
799	struct mlx4_spec_list *spec_l4 = NULL;
800	struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
801
802	spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
803	spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
804	spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
805	if (!spec_l2 || !spec_l3 || !spec_l4) {
806		err = -ENOMEM;
807		goto free_spec;
808	}
809
810	spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
811
812	if (proto == TCP_V4_FLOW) {
813		err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
814							   spec_l2,
815							   cmd->fs.h_u.
816							   tcp_ip4_spec.ip4dst);
817		if (err)
818			goto free_spec;
819		spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
820		spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
821		spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
822		spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
823		spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
824	} else {
825		err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
826							   spec_l2,
827							   cmd->fs.h_u.
828							   udp_ip4_spec.ip4dst);
829		if (err)
830			goto free_spec;
831		spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
832		spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
833		spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
834		spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
835		spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
836	}
837
838	if (l4_mask->ip4src)
839		spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
840	if (l4_mask->ip4dst)
841		spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
842
843	if (l4_mask->psrc)
844		spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
845	if (l4_mask->pdst)
846		spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
847
848	list_add_tail(&spec_l3->list, list_h);
849	list_add_tail(&spec_l4->list, list_h);
850
851	return 0;
852
853free_spec:
854	kfree(spec_l2);
855	kfree(spec_l3);
856	kfree(spec_l4);
857	return err;
858}
859
860static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
861					     struct ethtool_rxnfc *cmd,
862					     struct list_head *rule_list_h)
863{
864	int err;
865	struct ethhdr *eth_spec;
866	struct mlx4_spec_list *spec_l2;
867	struct mlx4_en_priv *priv = netdev_priv(dev);
868
869	err = mlx4_en_validate_flow(dev, cmd);
870	if (err)
871		return err;
872
873	switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
874	case ETHER_FLOW:
875		spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
876		if (!spec_l2)
877			return -ENOMEM;
878
879		eth_spec = &cmd->fs.h_u.ether_spec;
880		mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
881					     &eth_spec->h_dest[0]);
882		spec_l2->eth.ether_type = eth_spec->h_proto;
883		if (eth_spec->h_proto)
884			spec_l2->eth.ether_type_enable = 1;
885		break;
886	case IP_USER_FLOW:
887		err = add_ip_rule(priv, cmd, rule_list_h);
888		break;
889	case TCP_V4_FLOW:
890		err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
891		break;
892	case UDP_V4_FLOW:
893		err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
894		break;
895	}
896
897	return err;
898}
899
900static int mlx4_en_flow_replace(struct net_device *dev,
901				struct ethtool_rxnfc *cmd)
902{
903	int err;
904	struct mlx4_en_priv *priv = netdev_priv(dev);
905	struct ethtool_flow_id *loc_rule;
906	struct mlx4_spec_list *spec, *tmp_spec;
907	u32 qpn;
908	u64 reg_id;
909
910	struct mlx4_net_trans_rule rule = {
911		.queue_mode = MLX4_NET_TRANS_Q_FIFO,
912		.exclusive = 0,
913		.allow_loopback = 1,
914		.promisc_mode = MLX4_FS_REGULAR,
915	};
916
917	rule.port = priv->port;
918	rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
919	INIT_LIST_HEAD(&rule.list);
920
921	/* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
922	if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
923		qpn = priv->drop_qp.qpn;
924	else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
925		qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
926	} else {
927		if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
928			en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
929				cmd->fs.ring_cookie);
930			return -EINVAL;
931		}
932		qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
933		if (!qpn) {
934			en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
935				cmd->fs.ring_cookie);
936			return -EINVAL;
937		}
938	}
939	rule.qpn = qpn;
940	err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
941	if (err)
942		goto out_free_list;
943
944	loc_rule = &priv->ethtool_rules[cmd->fs.location];
945	if (loc_rule->id) {
946		err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
947		if (err) {
948			en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
949			       cmd->fs.location, loc_rule->id);
950			goto out_free_list;
951		}
952		loc_rule->id = 0;
953		memset(&loc_rule->flow_spec, 0,
954		       sizeof(struct ethtool_rx_flow_spec));
955		list_del(&loc_rule->list);
956	}
957	err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
958	if (err) {
959		en_err(priv, "Fail to attach network rule at location %d\n",
960		       cmd->fs.location);
961		goto out_free_list;
962	}
963	loc_rule->id = reg_id;
964	memcpy(&loc_rule->flow_spec, &cmd->fs,
965	       sizeof(struct ethtool_rx_flow_spec));
966	list_add_tail(&loc_rule->list, &priv->ethtool_list);
967
968out_free_list:
969	list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
970		list_del(&spec->list);
971		kfree(spec);
972	}
973	return err;
974}
975
976static int mlx4_en_flow_detach(struct net_device *dev,
977			       struct ethtool_rxnfc *cmd)
978{
979	int err = 0;
980	struct ethtool_flow_id *rule;
981	struct mlx4_en_priv *priv = netdev_priv(dev);
982
983	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
984		return -EINVAL;
985
986	rule = &priv->ethtool_rules[cmd->fs.location];
987	if (!rule->id) {
988		err =  -ENOENT;
989		goto out;
990	}
991
992	err = mlx4_flow_detach(priv->mdev->dev, rule->id);
993	if (err) {
994		en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
995		       cmd->fs.location, rule->id);
996		goto out;
997	}
998	rule->id = 0;
999	memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
1000	list_del(&rule->list);
1001out:
1002	return err;
1003
1004}
1005
1006static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
1007			    int loc)
1008{
1009	int err = 0;
1010	struct ethtool_flow_id *rule;
1011	struct mlx4_en_priv *priv = netdev_priv(dev);
1012
1013	if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1014		return -EINVAL;
1015
1016	rule = &priv->ethtool_rules[loc];
1017	if (rule->id)
1018		memcpy(&cmd->fs, &rule->flow_spec,
1019		       sizeof(struct ethtool_rx_flow_spec));
1020	else
1021		err = -ENOENT;
1022
1023	return err;
1024}
1025
1026static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
1027{
1028
1029	int i, res = 0;
1030	for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
1031		if (priv->ethtool_rules[i].id)
1032			res++;
1033	}
1034	return res;
1035
1036}
1037
1038static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1039			     u32 *rule_locs)
1040{
1041	struct mlx4_en_priv *priv = netdev_priv(dev);
1042	struct mlx4_en_dev *mdev = priv->mdev;
1043	int err = 0;
1044	int i = 0, priority = 0;
1045
1046	if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
1047	     cmd->cmd == ETHTOOL_GRXCLSRULE ||
1048	     cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
1049	    (mdev->dev->caps.steering_mode !=
1050	     MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
1051		return -EINVAL;
1052
1053	switch (cmd->cmd) {
1054	case ETHTOOL_GRXRINGS:
1055		cmd->data = priv->rx_ring_num;
1056		break;
1057	case ETHTOOL_GRXCLSRLCNT:
1058		cmd->rule_cnt = mlx4_en_get_num_flows(priv);
1059		break;
1060	case ETHTOOL_GRXCLSRULE:
1061		err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
1062		break;
1063	case ETHTOOL_GRXCLSRLALL:
1064		while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
1065			err = mlx4_en_get_flow(dev, cmd, i);
1066			if (!err)
1067				rule_locs[priority++] = i;
1068			i++;
1069		}
1070		err = 0;
1071		break;
1072	default:
1073		err = -EOPNOTSUPP;
1074		break;
1075	}
1076
1077	return err;
1078}
1079
1080static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1081{
1082	int err = 0;
1083	struct mlx4_en_priv *priv = netdev_priv(dev);
1084	struct mlx4_en_dev *mdev = priv->mdev;
1085
1086	if (mdev->dev->caps.steering_mode !=
1087	    MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
1088		return -EINVAL;
1089
1090	switch (cmd->cmd) {
1091	case ETHTOOL_SRXCLSRLINS:
1092		err = mlx4_en_flow_replace(dev, cmd);
1093		break;
1094	case ETHTOOL_SRXCLSRLDEL:
1095		err = mlx4_en_flow_detach(dev, cmd);
1096		break;
1097	default:
1098		en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
1099		return -EINVAL;
1100	}
1101
1102	return err;
1103}
1104
1105static void mlx4_en_get_channels(struct net_device *dev,
1106				 struct ethtool_channels *channel)
1107{
1108	struct mlx4_en_priv *priv = netdev_priv(dev);
1109
1110	memset(channel, 0, sizeof(*channel));
1111
1112	channel->max_rx = MAX_RX_RINGS;
1113	channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
1114
1115	channel->rx_count = priv->rx_ring_num;
1116	channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
1117}
1118
1119static int mlx4_en_set_channels(struct net_device *dev,
1120				struct ethtool_channels *channel)
1121{
1122	struct mlx4_en_priv *priv = netdev_priv(dev);
1123	struct mlx4_en_dev *mdev = priv->mdev;
1124	int port_up = 0;
1125	int err = 0;
1126
1127	if (channel->other_count || channel->combined_count ||
1128	    channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
1129	    channel->rx_count > MAX_RX_RINGS ||
1130	    !channel->tx_count || !channel->rx_count)
1131		return -EINVAL;
1132
1133	mutex_lock(&mdev->state_lock);
1134	if (priv->port_up) {
1135		port_up = 1;
1136		mlx4_en_stop_port(dev, 1);
1137	}
1138
1139	mlx4_en_free_resources(priv);
1140
1141	priv->num_tx_rings_p_up = channel->tx_count;
1142	priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
1143	priv->rx_ring_num = channel->rx_count;
1144
1145	err = mlx4_en_alloc_resources(priv);
1146	if (err) {
1147		en_err(priv, "Failed reallocating port resources\n");
1148		goto out;
1149	}
1150
1151	netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1152	netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1153
1154	if (dev->num_tc)
1155		mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
1156
1157	en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
1158	en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
1159
1160	if (port_up) {
1161		err = mlx4_en_start_port(dev);
1162		if (err)
1163			en_err(priv, "Failed starting port\n");
1164	}
1165
1166	err = mlx4_en_moderation_update(priv);
1167
1168out:
1169	mutex_unlock(&mdev->state_lock);
1170	return err;
1171}
1172
1173static int mlx4_en_get_ts_info(struct net_device *dev,
1174			       struct ethtool_ts_info *info)
1175{
1176	struct mlx4_en_priv *priv = netdev_priv(dev);
1177	struct mlx4_en_dev *mdev = priv->mdev;
1178	int ret;
1179
1180	ret = ethtool_op_get_ts_info(dev, info);
1181	if (ret)
1182		return ret;
1183
1184	if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1185		info->so_timestamping |=
1186			SOF_TIMESTAMPING_TX_HARDWARE |
1187			SOF_TIMESTAMPING_RX_HARDWARE |
1188			SOF_TIMESTAMPING_RAW_HARDWARE;
1189
1190		info->tx_types =
1191			(1 << HWTSTAMP_TX_OFF) |
1192			(1 << HWTSTAMP_TX_ON);
1193
1194		info->rx_filters =
1195			(1 << HWTSTAMP_FILTER_NONE) |
1196			(1 << HWTSTAMP_FILTER_ALL);
1197
1198		if (mdev->ptp_clock)
1199			info->phc_index = ptp_clock_index(mdev->ptp_clock);
1200	}
1201
1202	return ret;
1203}
1204
1205const struct ethtool_ops mlx4_en_ethtool_ops = {
1206	.get_drvinfo = mlx4_en_get_drvinfo,
1207	.get_settings = mlx4_en_get_settings,
1208	.set_settings = mlx4_en_set_settings,
1209	.get_link = ethtool_op_get_link,
1210	.get_strings = mlx4_en_get_strings,
1211	.get_sset_count = mlx4_en_get_sset_count,
1212	.get_ethtool_stats = mlx4_en_get_ethtool_stats,
1213	.self_test = mlx4_en_self_test,
1214	.get_wol = mlx4_en_get_wol,
1215	.set_wol = mlx4_en_set_wol,
1216	.get_msglevel = mlx4_en_get_msglevel,
1217	.set_msglevel = mlx4_en_set_msglevel,
1218	.get_coalesce = mlx4_en_get_coalesce,
1219	.set_coalesce = mlx4_en_set_coalesce,
1220	.get_pauseparam = mlx4_en_get_pauseparam,
1221	.set_pauseparam = mlx4_en_set_pauseparam,
1222	.get_ringparam = mlx4_en_get_ringparam,
1223	.set_ringparam = mlx4_en_set_ringparam,
1224	.get_rxnfc = mlx4_en_get_rxnfc,
1225	.set_rxnfc = mlx4_en_set_rxnfc,
1226	.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
1227	.get_rxfh = mlx4_en_get_rxfh,
1228	.set_rxfh = mlx4_en_set_rxfh,
1229	.get_channels = mlx4_en_get_channels,
1230	.set_channels = mlx4_en_set_channels,
1231	.get_ts_info = mlx4_en_get_ts_info,
1232};
1233
1234
1235
1236
1237
1238