1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include <linux/prefetch.h>
19#include <linux/module.h>
20#include "be.h"
21#include "be_cmds.h"
22#include <asm/div64.h>
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
30static unsigned int num_vfs;
31module_param(num_vfs, uint, S_IRUGO);
32MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46	{ 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
49/* UE Status Low CSR */
50static const char * const ue_status_low_desc[] = {
51	"CEV",
52	"CTX",
53	"DBUF",
54	"ERX",
55	"Host",
56	"MPU",
57	"NDMA",
58	"PTC ",
59	"RDMA ",
60	"RXF ",
61	"RXIPS ",
62	"RXULP0 ",
63	"RXULP1 ",
64	"RXULP2 ",
65	"TIM ",
66	"TPOST ",
67	"TPRE ",
68	"TXIPS ",
69	"TXULP0 ",
70	"TXULP1 ",
71	"UC ",
72	"WDMA ",
73	"TXULP2 ",
74	"HOST1 ",
75	"P0_OB_LINK ",
76	"P1_OB_LINK ",
77	"HOST_GPIO ",
78	"MBOX ",
79	"AXGMAC0",
80	"AXGMAC1",
81	"JTAG",
82	"MPU_INTPEND"
83};
84/* UE Status High CSR */
85static const char * const ue_status_hi_desc[] = {
86	"LPCMEMHOST",
87	"MGMT_MAC",
88	"PCS0ONLINE",
89	"MPU_IRAM",
90	"PCS1ONLINE",
91	"PCTL0",
92	"PCTL1",
93	"PMEM",
94	"RR",
95	"TXPB",
96	"RXPP",
97	"XAUI",
98	"TXP",
99	"ARM",
100	"IPC",
101	"HOST2",
102	"HOST3",
103	"HOST4",
104	"HOST5",
105	"HOST6",
106	"HOST7",
107	"HOST8",
108	"HOST9",
109	"NETC",
110	"Unknown",
111	"Unknown",
112	"Unknown",
113	"Unknown",
114	"Unknown",
115	"Unknown",
116	"Unknown",
117	"Unknown"
118};
119
120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122	return (adapter->function_mode & FLEX10_MODE ||
123		adapter->function_mode & VNIC_MODE ||
124		adapter->function_mode & UMC_ENABLED);
125}
126
127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129	struct be_dma_mem *mem = &q->dma_mem;
130	if (mem->va) {
131		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132				  mem->dma);
133		mem->va = NULL;
134	}
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138		u16 len, u16 entry_size)
139{
140	struct be_dma_mem *mem = &q->dma_mem;
141
142	memset(q, 0, sizeof(*q));
143	q->len = len;
144	q->entry_size = entry_size;
145	mem->size = len * entry_size;
146	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147				     GFP_KERNEL);
148	if (!mem->va)
149		return -ENOMEM;
150	memset(mem->va, 0, mem->size);
151	return 0;
152}
153
154static void be_intr_set(struct be_adapter *adapter, bool enable)
155{
156	u32 reg, enabled;
157
158	if (adapter->eeh_err)
159		return;
160
161	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162				&reg);
163	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165	if (!enabled && enable)
166		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167	else if (enabled && !enable)
168		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169	else
170		return;
171
172	pci_write_config_dword(adapter->pdev,
173			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174}
175
176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177{
178	u32 val = 0;
179	val |= qid & DB_RQ_RING_ID_MASK;
180	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182	wmb();
183	iowrite32(val, adapter->db + DB_RQ_OFFSET);
184}
185
186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187{
188	u32 val = 0;
189	val |= qid & DB_TXULP_RING_ID_MASK;
190	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192	wmb();
193	iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194}
195
196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197		bool arm, bool clear_int, u16 num_popped)
198{
199	u32 val = 0;
200	val |= qid & DB_EQ_RING_ID_MASK;
201	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202			DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204	if (adapter->eeh_err)
205		return;
206
207	if (arm)
208		val |= 1 << DB_EQ_REARM_SHIFT;
209	if (clear_int)
210		val |= 1 << DB_EQ_CLR_SHIFT;
211	val |= 1 << DB_EQ_EVNT_SHIFT;
212	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213	iowrite32(val, adapter->db + DB_EQ_OFFSET);
214}
215
216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217{
218	u32 val = 0;
219	val |= qid & DB_CQ_RING_ID_MASK;
220	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221			DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223	if (adapter->eeh_err)
224		return;
225
226	if (arm)
227		val |= 1 << DB_CQ_REARM_SHIFT;
228	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229	iowrite32(val, adapter->db + DB_CQ_OFFSET);
230}
231
232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234	struct be_adapter *adapter = netdev_priv(netdev);
235	struct sockaddr *addr = p;
236	int status = 0;
237	u8 current_mac[ETH_ALEN];
238	u32 pmac_id = adapter->pmac_id[0];
239
240	if (!is_valid_ether_addr(addr->sa_data))
241		return -EADDRNOTAVAIL;
242
243	status = be_cmd_mac_addr_query(adapter, current_mac,
244				MAC_ADDRESS_TYPE_NETWORK, false,
245				adapter->if_handle, 0);
246	if (status)
247		goto err;
248
249	if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250		status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251				adapter->if_handle, &adapter->pmac_id[0], 0);
252		if (status)
253			goto err;
254
255		be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256	}
257	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258	return 0;
259err:
260	dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261	return status;
262}
263
264static void populate_be2_stats(struct be_adapter *adapter)
265{
266	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269	struct be_port_rxf_stats_v0 *port_stats =
270					&rxf_stats->port[adapter->port_num];
271	struct be_drv_stats *drvs = &adapter->drv_stats;
272
273	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274	drvs->rx_pause_frames = port_stats->rx_pause_frames;
275	drvs->rx_crc_errors = port_stats->rx_crc_errors;
276	drvs->rx_control_frames = port_stats->rx_control_frames;
277	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289	drvs->rx_dropped_header_too_small =
290		port_stats->rx_dropped_header_too_small;
291	drvs->rx_address_mismatch_drops =
292					port_stats->rx_address_mismatch_drops +
293					port_stats->rx_vlan_mismatch_drops;
294	drvs->rx_alignment_symbol_errors =
295		port_stats->rx_alignment_symbol_errors;
296
297	drvs->tx_pauseframes = port_stats->tx_pauseframes;
298	drvs->tx_controlframes = port_stats->tx_controlframes;
299
300	if (adapter->port_num)
301		drvs->jabber_events = rxf_stats->port1_jabber_events;
302	else
303		drvs->jabber_events = rxf_stats->port0_jabber_events;
304	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306	drvs->forwarded_packets = rxf_stats->forwarded_packets;
307	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
315	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318	struct be_port_rxf_stats_v1 *port_stats =
319					&rxf_stats->port[adapter->port_num];
320	struct be_drv_stats *drvs = &adapter->drv_stats;
321
322	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325	drvs->rx_pause_frames = port_stats->rx_pause_frames;
326	drvs->rx_crc_errors = port_stats->rx_crc_errors;
327	drvs->rx_control_frames = port_stats->rx_control_frames;
328	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338	drvs->rx_dropped_header_too_small =
339		port_stats->rx_dropped_header_too_small;
340	drvs->rx_input_fifo_overflow_drop =
341		port_stats->rx_input_fifo_overflow_drop;
342	drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343	drvs->rx_alignment_symbol_errors =
344		port_stats->rx_alignment_symbol_errors;
345	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346	drvs->tx_pauseframes = port_stats->tx_pauseframes;
347	drvs->tx_controlframes = port_stats->tx_controlframes;
348	drvs->jabber_events = port_stats->jabber_events;
349	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351	drvs->forwarded_packets = rxf_stats->forwarded_packets;
352	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
358static void populate_lancer_stats(struct be_adapter *adapter)
359{
360
361	struct be_drv_stats *drvs = &adapter->drv_stats;
362	struct lancer_pport_stats *pport_stats =
363					pport_stats_from_cmd(adapter);
364
365	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375	drvs->rx_dropped_tcp_length =
376				pport_stats->rx_dropped_invalid_tcp_length;
377	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380	drvs->rx_dropped_header_too_small =
381				pport_stats->rx_dropped_header_too_small;
382	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383	drvs->rx_address_mismatch_drops =
384					pport_stats->rx_address_mismatch_drops +
385					pport_stats->rx_vlan_mismatch_drops;
386	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390	drvs->jabber_events = pport_stats->rx_jabbers;
391	drvs->forwarded_packets = pport_stats->num_forwards_lo;
392	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393	drvs->rx_drops_too_many_frags =
394				pport_stats->rx_drops_too_many_frags_lo;
395}
396
397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x)			(x & 0xFFFF)
400#define hi(x)			(x & 0xFFFF0000)
401	bool wrapped = val < lo(*acc);
402	u32 newacc = hi(*acc) + val;
403
404	if (wrapped)
405		newacc += 65536;
406	ACCESS_ONCE(*acc) = newacc;
407}
408
409void be_parse_stats(struct be_adapter *adapter)
410{
411	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412	struct be_rx_obj *rxo;
413	int i;
414
415	if (adapter->generation == BE_GEN3) {
416		if (lancer_chip(adapter))
417			populate_lancer_stats(adapter);
418		 else
419			populate_be3_stats(adapter);
420	} else {
421		populate_be2_stats(adapter);
422	}
423
424	/* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
425	for_all_rx_queues(adapter, rxo, i) {
426		/* below erx HW counter can actually wrap around after
427		 * 65535. Driver accumulates a 32-bit value
428		 */
429		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430				(u16)erx->rx_drops_no_fragments[rxo->q.id]);
431	}
432}
433
434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435					struct rtnl_link_stats64 *stats)
436{
437	struct be_adapter *adapter = netdev_priv(netdev);
438	struct be_drv_stats *drvs = &adapter->drv_stats;
439	struct be_rx_obj *rxo;
440	struct be_tx_obj *txo;
441	u64 pkts, bytes;
442	unsigned int start;
443	int i;
444
445	for_all_rx_queues(adapter, rxo, i) {
446		const struct be_rx_stats *rx_stats = rx_stats(rxo);
447		do {
448			start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449			pkts = rx_stats(rxo)->rx_pkts;
450			bytes = rx_stats(rxo)->rx_bytes;
451		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452		stats->rx_packets += pkts;
453		stats->rx_bytes += bytes;
454		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456					rx_stats(rxo)->rx_drops_no_frags;
457	}
458
459	for_all_tx_queues(adapter, txo, i) {
460		const struct be_tx_stats *tx_stats = tx_stats(txo);
461		do {
462			start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463			pkts = tx_stats(txo)->tx_pkts;
464			bytes = tx_stats(txo)->tx_bytes;
465		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466		stats->tx_packets += pkts;
467		stats->tx_bytes += bytes;
468	}
469
470	/* bad pkts received */
471	stats->rx_errors = drvs->rx_crc_errors +
472		drvs->rx_alignment_symbol_errors +
473		drvs->rx_in_range_errors +
474		drvs->rx_out_range_errors +
475		drvs->rx_frame_too_long +
476		drvs->rx_dropped_too_small +
477		drvs->rx_dropped_too_short +
478		drvs->rx_dropped_header_too_small +
479		drvs->rx_dropped_tcp_length +
480		drvs->rx_dropped_runt;
481
482	/* detailed rx errors */
483	stats->rx_length_errors = drvs->rx_in_range_errors +
484		drvs->rx_out_range_errors +
485		drvs->rx_frame_too_long;
486
487	stats->rx_crc_errors = drvs->rx_crc_errors;
488
489	/* frame alignment errors */
490	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
491
492	/* receiver fifo overrun */
493	/* drops_no_pbuf is no per i/f, it's per BE card */
494	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
495				drvs->rx_input_fifo_overflow_drop +
496				drvs->rx_drops_no_pbuf;
497	return stats;
498}
499
500void be_link_status_update(struct be_adapter *adapter, u8 link_status)
501{
502	struct net_device *netdev = adapter->netdev;
503
504	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
505		netif_carrier_off(netdev);
506		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
507	}
508
509	if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510		netif_carrier_on(netdev);
511	else
512		netif_carrier_off(netdev);
513}
514
515static void be_tx_stats_update(struct be_tx_obj *txo,
516			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
517{
518	struct be_tx_stats *stats = tx_stats(txo);
519
520	u64_stats_update_begin(&stats->sync);
521	stats->tx_reqs++;
522	stats->tx_wrbs += wrb_cnt;
523	stats->tx_bytes += copied;
524	stats->tx_pkts += (gso_segs ? gso_segs : 1);
525	if (stopped)
526		stats->tx_stops++;
527	u64_stats_update_end(&stats->sync);
528}
529
530/* Determine number of WRB entries needed to xmit data in an skb */
531static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532								bool *dummy)
533{
534	int cnt = (skb->len > skb->data_len);
535
536	cnt += skb_shinfo(skb)->nr_frags;
537
538	/* to account for hdr wrb */
539	cnt++;
540	if (lancer_chip(adapter) || !(cnt & 1)) {
541		*dummy = false;
542	} else {
543		/* add a dummy to make it an even num */
544		cnt++;
545		*dummy = true;
546	}
547	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548	return cnt;
549}
550
551static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552{
553	wrb->frag_pa_hi = upper_32_bits(addr);
554	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556}
557
558static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559					struct sk_buff *skb)
560{
561	u8 vlan_prio;
562	u16 vlan_tag;
563
564	vlan_tag = vlan_tx_tag_get(skb);
565	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566	/* If vlan priority provided by OS is NOT in available bmap */
567	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569				adapter->recommended_prio;
570
571	return vlan_tag;
572}
573
574static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575		struct sk_buff *skb, u32 wrb_cnt, u32 len)
576{
577	u16 vlan_tag;
578
579	memset(hdr, 0, sizeof(*hdr));
580
581	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
583	if (skb_is_gso(skb)) {
584		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586			hdr, skb_shinfo(skb)->gso_size);
587		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
588			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
589		if (lancer_chip(adapter) && adapter->sli_family  ==
590							LANCER_A0_SLI_FAMILY) {
591			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592			if (is_tcp_pkt(skb))
593				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594								tcpcs, hdr, 1);
595			else if (is_udp_pkt(skb))
596				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597								udpcs, hdr, 1);
598		}
599	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600		if (is_tcp_pkt(skb))
601			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602		else if (is_udp_pkt(skb))
603			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604	}
605
606	if (vlan_tx_tag_present(skb)) {
607		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
608		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
609		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
610	}
611
612	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616}
617
618static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
619		bool unmap_single)
620{
621	dma_addr_t dma;
622
623	be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
626	if (wrb->frag_len) {
627		if (unmap_single)
628			dma_unmap_single(dev, dma, wrb->frag_len,
629					 DMA_TO_DEVICE);
630		else
631			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
632	}
633}
634
635static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
636		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637{
638	dma_addr_t busaddr;
639	int i, copied = 0;
640	struct device *dev = &adapter->pdev->dev;
641	struct sk_buff *first_skb = skb;
642	struct be_eth_wrb *wrb;
643	struct be_eth_hdr_wrb *hdr;
644	bool map_single = false;
645	u16 map_head;
646
647	hdr = queue_head_node(txq);
648	queue_head_inc(txq);
649	map_head = txq->head;
650
651	if (skb->len > skb->data_len) {
652		int len = skb_headlen(skb);
653		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654		if (dma_mapping_error(dev, busaddr))
655			goto dma_err;
656		map_single = true;
657		wrb = queue_head_node(txq);
658		wrb_fill(wrb, busaddr, len);
659		be_dws_cpu_to_le(wrb, sizeof(*wrb));
660		queue_head_inc(txq);
661		copied += len;
662	}
663
664	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
665		const struct skb_frag_struct *frag =
666			&skb_shinfo(skb)->frags[i];
667		busaddr = skb_frag_dma_map(dev, frag, 0,
668					   skb_frag_size(frag), DMA_TO_DEVICE);
669		if (dma_mapping_error(dev, busaddr))
670			goto dma_err;
671		wrb = queue_head_node(txq);
672		wrb_fill(wrb, busaddr, skb_frag_size(frag));
673		be_dws_cpu_to_le(wrb, sizeof(*wrb));
674		queue_head_inc(txq);
675		copied += skb_frag_size(frag);
676	}
677
678	if (dummy_wrb) {
679		wrb = queue_head_node(txq);
680		wrb_fill(wrb, 0, 0);
681		be_dws_cpu_to_le(wrb, sizeof(*wrb));
682		queue_head_inc(txq);
683	}
684
685	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
686	be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688	return copied;
689dma_err:
690	txq->head = map_head;
691	while (copied) {
692		wrb = queue_head_node(txq);
693		unmap_tx_frag(dev, wrb, map_single);
694		map_single = false;
695		copied -= wrb->frag_len;
696		queue_head_inc(txq);
697	}
698	return 0;
699}
700
701static netdev_tx_t be_xmit(struct sk_buff *skb,
702			struct net_device *netdev)
703{
704	struct be_adapter *adapter = netdev_priv(netdev);
705	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706	struct be_queue_info *txq = &txo->q;
707	u32 wrb_cnt = 0, copied = 0;
708	u32 start = txq->head;
709	bool dummy_wrb, stopped = false;
710
711	/* For vlan tagged pkts, BE
712	 * 1) calculates checksum even when CSO is not requested
713	 * 2) calculates checksum wrongly for padded pkt less than
714	 * 60 bytes long.
715	 * As a workaround disable TX vlan offloading in such cases.
716	 */
717	if (unlikely(vlan_tx_tag_present(skb) &&
718		     (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719		skb = skb_share_check(skb, GFP_ATOMIC);
720		if (unlikely(!skb))
721			goto tx_drop;
722
723		skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724		if (unlikely(!skb))
725			goto tx_drop;
726
727		skb->vlan_tci = 0;
728	}
729
730	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
731
732	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
733	if (copied) {
734		int gso_segs = skb_shinfo(skb)->gso_segs;
735
736		/* record the sent skb in the sent_skb table */
737		BUG_ON(txo->sent_skb_list[start]);
738		txo->sent_skb_list[start] = skb;
739
740		/* Ensure txq has space for the next skb; Else stop the queue
741		 * *BEFORE* ringing the tx doorbell, so that we serialze the
742		 * tx compls of the current transmit which'll wake up the queue
743		 */
744		atomic_add(wrb_cnt, &txq->used);
745		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
746								txq->len) {
747			netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
748			stopped = true;
749		}
750
751		be_txq_notify(adapter, txq->id, wrb_cnt);
752
753		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
754	} else {
755		txq->head = start;
756		dev_kfree_skb_any(skb);
757	}
758tx_drop:
759	return NETDEV_TX_OK;
760}
761
762static int be_change_mtu(struct net_device *netdev, int new_mtu)
763{
764	struct be_adapter *adapter = netdev_priv(netdev);
765	if (new_mtu < BE_MIN_MTU ||
766			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
767					(ETH_HLEN + ETH_FCS_LEN))) {
768		dev_info(&adapter->pdev->dev,
769			"MTU must be between %d and %d bytes\n",
770			BE_MIN_MTU,
771			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
772		return -EINVAL;
773	}
774	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
775			netdev->mtu, new_mtu);
776	netdev->mtu = new_mtu;
777	return 0;
778}
779
780/*
781 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
782 * If the user configures more, place BE in vlan promiscuous mode.
783 */
784static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
785{
786	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
787	u16 vtag[BE_NUM_VLANS_SUPPORTED];
788	u16 ntags = 0, i;
789	int status = 0;
790
791	if (vf) {
792		vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
793		status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
794					    1, 1, 0);
795	}
796
797	/* No need to further configure vids if in promiscuous mode */
798	if (adapter->promiscuous)
799		return 0;
800
801	if (adapter->vlans_added <= adapter->max_vlans)  {
802		/* Construct VLAN Table to give to HW */
803		for (i = 0; i < VLAN_N_VID; i++) {
804			if (adapter->vlan_tag[i]) {
805				vtag[ntags] = cpu_to_le16(i);
806				ntags++;
807			}
808		}
809		status = be_cmd_vlan_config(adapter, adapter->if_handle,
810					vtag, ntags, 1, 0);
811	} else {
812		status = be_cmd_vlan_config(adapter, adapter->if_handle,
813					NULL, 0, 1, 1);
814	}
815
816	return status;
817}
818
819static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
820{
821	struct be_adapter *adapter = netdev_priv(netdev);
822	int status = 0;
823
824	if (!be_physfn(adapter)) {
825		status = -EINVAL;
826		goto ret;
827	}
828
829	adapter->vlan_tag[vid] = 1;
830	if (adapter->vlans_added <= (adapter->max_vlans + 1))
831		status = be_vid_config(adapter, false, 0);
832
833	if (!status)
834		adapter->vlans_added++;
835	else
836		adapter->vlan_tag[vid] = 0;
837ret:
838	return status;
839}
840
841static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
842{
843	struct be_adapter *adapter = netdev_priv(netdev);
844	int status = 0;
845
846	if (!be_physfn(adapter)) {
847		status = -EINVAL;
848		goto ret;
849	}
850
851	adapter->vlan_tag[vid] = 0;
852	if (adapter->vlans_added <= adapter->max_vlans)
853		status = be_vid_config(adapter, false, 0);
854
855	if (!status)
856		adapter->vlans_added--;
857	else
858		adapter->vlan_tag[vid] = 1;
859ret:
860	return status;
861}
862
863static void be_set_rx_mode(struct net_device *netdev)
864{
865	struct be_adapter *adapter = netdev_priv(netdev);
866
867	if (netdev->flags & IFF_PROMISC) {
868		be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
869		adapter->promiscuous = true;
870		goto done;
871	}
872
873	/* BE was previously in promiscuous mode; disable it */
874	if (adapter->promiscuous) {
875		adapter->promiscuous = false;
876		be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
877
878		if (adapter->vlans_added)
879			be_vid_config(adapter, false, 0);
880	}
881
882	/* Enable multicast promisc if num configured exceeds what we support */
883	if (netdev->flags & IFF_ALLMULTI ||
884			netdev_mc_count(netdev) > BE_MAX_MC) {
885		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
886		goto done;
887	}
888
889	if (netdev_uc_count(netdev) != adapter->uc_macs) {
890		struct netdev_hw_addr *ha;
891		int i = 1; /* First slot is claimed by the Primary MAC */
892
893		for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
894			be_cmd_pmac_del(adapter, adapter->if_handle,
895					adapter->pmac_id[i], 0);
896		}
897
898		if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
899			be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
900			adapter->promiscuous = true;
901			goto done;
902		}
903
904		netdev_for_each_uc_addr(ha, adapter->netdev) {
905			adapter->uc_macs++; /* First slot is for Primary MAC */
906			be_cmd_pmac_add(adapter, (u8 *)ha->addr,
907					adapter->if_handle,
908					&adapter->pmac_id[adapter->uc_macs], 0);
909		}
910	}
911
912	be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
913done:
914	return;
915}
916
917static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
918{
919	struct be_adapter *adapter = netdev_priv(netdev);
920	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
921	int status;
922
923	if (!sriov_enabled(adapter))
924		return -EPERM;
925
926	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
927		return -EINVAL;
928
929	if (lancer_chip(adapter)) {
930		status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
931	} else {
932		status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
933					 vf_cfg->pmac_id, vf + 1);
934
935		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
936					 &vf_cfg->pmac_id, vf + 1);
937	}
938
939	if (status)
940		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
941				mac, vf);
942	else
943		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
944
945	return status;
946}
947
948static int be_get_vf_config(struct net_device *netdev, int vf,
949			struct ifla_vf_info *vi)
950{
951	struct be_adapter *adapter = netdev_priv(netdev);
952	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
953
954	if (!sriov_enabled(adapter))
955		return -EPERM;
956
957	if (vf >= adapter->num_vfs)
958		return -EINVAL;
959
960	vi->vf = vf;
961	vi->tx_rate = vf_cfg->tx_rate;
962	vi->vlan = vf_cfg->vlan_tag;
963	vi->qos = 0;
964	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
965
966	return 0;
967}
968
969static int be_set_vf_vlan(struct net_device *netdev,
970			int vf, u16 vlan, u8 qos)
971{
972	struct be_adapter *adapter = netdev_priv(netdev);
973	int status = 0;
974
975	if (!sriov_enabled(adapter))
976		return -EPERM;
977
978	if (vf >= adapter->num_vfs || vlan > 4095)
979		return -EINVAL;
980
981	if (vlan) {
982		if (adapter->vf_cfg[vf].vlan_tag != vlan) {
983			/* If this is new value, program it. Else skip. */
984			adapter->vf_cfg[vf].vlan_tag = vlan;
985
986			status = be_cmd_set_hsw_config(adapter, vlan,
987				vf + 1, adapter->vf_cfg[vf].if_handle);
988		}
989	} else {
990		/* Reset Transparent Vlan Tagging. */
991		adapter->vf_cfg[vf].vlan_tag = 0;
992		vlan = adapter->vf_cfg[vf].def_vid;
993		status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
994			adapter->vf_cfg[vf].if_handle);
995	}
996
997
998	if (status)
999		dev_info(&adapter->pdev->dev,
1000				"VLAN %d config on VF %d failed\n", vlan, vf);
1001	return status;
1002}
1003
1004static int be_set_vf_tx_rate(struct net_device *netdev,
1005			int vf, int rate)
1006{
1007	struct be_adapter *adapter = netdev_priv(netdev);
1008	int status = 0;
1009
1010	if (!sriov_enabled(adapter))
1011		return -EPERM;
1012
1013	if (vf >= adapter->num_vfs)
1014		return -EINVAL;
1015
1016	if (rate < 100 || rate > 10000) {
1017		dev_err(&adapter->pdev->dev,
1018			"tx rate must be between 100 and 10000 Mbps\n");
1019		return -EINVAL;
1020	}
1021
1022	status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1023
1024	if (status)
1025		dev_err(&adapter->pdev->dev,
1026				"tx rate %d on VF %d failed\n", rate, vf);
1027	else
1028		adapter->vf_cfg[vf].tx_rate = rate;
1029	return status;
1030}
1031
1032static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1033{
1034	struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1035	ulong now = jiffies;
1036	ulong delta = now - stats->rx_jiffies;
1037	u64 pkts;
1038	unsigned int start, eqd;
1039
1040	if (!eqo->enable_aic) {
1041		eqd = eqo->eqd;
1042		goto modify_eqd;
1043	}
1044
1045	if (eqo->idx >= adapter->num_rx_qs)
1046		return;
1047
1048	stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1049
1050	/* Wrapped around */
1051	if (time_before(now, stats->rx_jiffies)) {
1052		stats->rx_jiffies = now;
1053		return;
1054	}
1055
1056	/* Update once a second */
1057	if (delta < HZ)
1058		return;
1059
1060	do {
1061		start = u64_stats_fetch_begin_bh(&stats->sync);
1062		pkts = stats->rx_pkts;
1063	} while (u64_stats_fetch_retry_bh(&stats->sync, start));
1064
1065	stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1066	stats->rx_pkts_prev = pkts;
1067	stats->rx_jiffies = now;
1068	eqd = (stats->rx_pps / 110000) << 3;
1069	eqd = min(eqd, eqo->max_eqd);
1070	eqd = max(eqd, eqo->min_eqd);
1071	if (eqd < 10)
1072		eqd = 0;
1073
1074modify_eqd:
1075	if (eqd != eqo->cur_eqd) {
1076		be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1077		eqo->cur_eqd = eqd;
1078	}
1079}
1080
1081static void be_rx_stats_update(struct be_rx_obj *rxo,
1082		struct be_rx_compl_info *rxcp)
1083{
1084	struct be_rx_stats *stats = rx_stats(rxo);
1085
1086	u64_stats_update_begin(&stats->sync);
1087	stats->rx_compl++;
1088	stats->rx_bytes += rxcp->pkt_size;
1089	stats->rx_pkts++;
1090	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1091		stats->rx_mcast_pkts++;
1092	if (rxcp->err)
1093		stats->rx_compl_err++;
1094	u64_stats_update_end(&stats->sync);
1095}
1096
1097static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1098{
1099	/* L4 checksum is not reliable for non TCP/UDP packets.
1100	 * Also ignore ipcksm for ipv6 pkts */
1101	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1102				(rxcp->ip_csum || rxcp->ipv6);
1103}
1104
1105static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1106						u16 frag_idx)
1107{
1108	struct be_adapter *adapter = rxo->adapter;
1109	struct be_rx_page_info *rx_page_info;
1110	struct be_queue_info *rxq = &rxo->q;
1111
1112	rx_page_info = &rxo->page_info_tbl[frag_idx];
1113	BUG_ON(!rx_page_info->page);
1114
1115	if (rx_page_info->last_page_user) {
1116		dma_unmap_page(&adapter->pdev->dev,
1117			       dma_unmap_addr(rx_page_info, bus),
1118			       adapter->big_page_size, DMA_FROM_DEVICE);
1119		rx_page_info->last_page_user = false;
1120	}
1121
1122	atomic_dec(&rxq->used);
1123	return rx_page_info;
1124}
1125
1126/* Throwaway the data in the Rx completion */
1127static void be_rx_compl_discard(struct be_rx_obj *rxo,
1128				struct be_rx_compl_info *rxcp)
1129{
1130	struct be_queue_info *rxq = &rxo->q;
1131	struct be_rx_page_info *page_info;
1132	u16 i, num_rcvd = rxcp->num_rcvd;
1133
1134	for (i = 0; i < num_rcvd; i++) {
1135		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1136		put_page(page_info->page);
1137		memset(page_info, 0, sizeof(*page_info));
1138		index_inc(&rxcp->rxq_idx, rxq->len);
1139	}
1140}
1141
1142/*
1143 * skb_fill_rx_data forms a complete skb for an ether frame
1144 * indicated by rxcp.
1145 */
1146static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1147			     struct be_rx_compl_info *rxcp)
1148{
1149	struct be_queue_info *rxq = &rxo->q;
1150	struct be_rx_page_info *page_info;
1151	u16 i, j;
1152	u16 hdr_len, curr_frag_len, remaining;
1153	u8 *start;
1154
1155	page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1156	start = page_address(page_info->page) + page_info->page_offset;
1157	prefetch(start);
1158
1159	/* Copy data in the first descriptor of this completion */
1160	curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1161
1162	/* Copy the header portion into skb_data */
1163	hdr_len = min(BE_HDR_LEN, curr_frag_len);
1164	memcpy(skb->data, start, hdr_len);
1165	skb->len = curr_frag_len;
1166	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1167		/* Complete packet has now been moved to data */
1168		put_page(page_info->page);
1169		skb->data_len = 0;
1170		skb->tail += curr_frag_len;
1171	} else {
1172		skb_shinfo(skb)->nr_frags = 1;
1173		skb_frag_set_page(skb, 0, page_info->page);
1174		skb_shinfo(skb)->frags[0].page_offset =
1175					page_info->page_offset + hdr_len;
1176		skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1177		skb->data_len = curr_frag_len - hdr_len;
1178		skb->truesize += rx_frag_size;
1179		skb->tail += hdr_len;
1180	}
1181	page_info->page = NULL;
1182
1183	if (rxcp->pkt_size <= rx_frag_size) {
1184		BUG_ON(rxcp->num_rcvd != 1);
1185		return;
1186	}
1187
1188	/* More frags present for this completion */
1189	index_inc(&rxcp->rxq_idx, rxq->len);
1190	remaining = rxcp->pkt_size - curr_frag_len;
1191	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1192		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1193		curr_frag_len = min(remaining, rx_frag_size);
1194
1195		/* Coalesce all frags from the same physical page in one slot */
1196		if (page_info->page_offset == 0) {
1197			/* Fresh page */
1198			j++;
1199			skb_frag_set_page(skb, j, page_info->page);
1200			skb_shinfo(skb)->frags[j].page_offset =
1201							page_info->page_offset;
1202			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1203			skb_shinfo(skb)->nr_frags++;
1204		} else {
1205			put_page(page_info->page);
1206		}
1207
1208		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1209		skb->len += curr_frag_len;
1210		skb->data_len += curr_frag_len;
1211		skb->truesize += rx_frag_size;
1212		remaining -= curr_frag_len;
1213		index_inc(&rxcp->rxq_idx, rxq->len);
1214		page_info->page = NULL;
1215	}
1216	BUG_ON(j > MAX_SKB_FRAGS);
1217}
1218
1219/* Process the RX completion indicated by rxcp when GRO is disabled */
1220static void be_rx_compl_process(struct be_rx_obj *rxo,
1221				struct be_rx_compl_info *rxcp)
1222{
1223	struct be_adapter *adapter = rxo->adapter;
1224	struct net_device *netdev = adapter->netdev;
1225	struct sk_buff *skb;
1226
1227	skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1228	if (unlikely(!skb)) {
1229		rx_stats(rxo)->rx_drops_no_skbs++;
1230		be_rx_compl_discard(rxo, rxcp);
1231		return;
1232	}
1233
1234	skb_fill_rx_data(rxo, skb, rxcp);
1235
1236	if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1237		skb->ip_summed = CHECKSUM_UNNECESSARY;
1238	else
1239		skb_checksum_none_assert(skb);
1240
1241	skb->protocol = eth_type_trans(skb, netdev);
1242	if (netdev->features & NETIF_F_RXHASH)
1243		skb->rxhash = rxcp->rss_hash;
1244
1245
1246	if (rxcp->vlanf)
1247		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1248
1249	netif_receive_skb(skb);
1250}
1251
1252/* Process the RX completion indicated by rxcp when GRO is enabled */
1253void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1254			     struct be_rx_compl_info *rxcp)
1255{
1256	struct be_adapter *adapter = rxo->adapter;
1257	struct be_rx_page_info *page_info;
1258	struct sk_buff *skb = NULL;
1259	struct be_queue_info *rxq = &rxo->q;
1260	u16 remaining, curr_frag_len;
1261	u16 i, j;
1262
1263	skb = napi_get_frags(napi);
1264	if (!skb) {
1265		be_rx_compl_discard(rxo, rxcp);
1266		return;
1267	}
1268
1269	remaining = rxcp->pkt_size;
1270	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1271		page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1272
1273		curr_frag_len = min(remaining, rx_frag_size);
1274
1275		/* Coalesce all frags from the same physical page in one slot */
1276		if (i == 0 || page_info->page_offset == 0) {
1277			/* First frag or Fresh page */
1278			j++;
1279			skb_frag_set_page(skb, j, page_info->page);
1280			skb_shinfo(skb)->frags[j].page_offset =
1281							page_info->page_offset;
1282			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1283		} else {
1284			put_page(page_info->page);
1285		}
1286		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1287		skb->truesize += rx_frag_size;
1288		remaining -= curr_frag_len;
1289		index_inc(&rxcp->rxq_idx, rxq->len);
1290		memset(page_info, 0, sizeof(*page_info));
1291	}
1292	BUG_ON(j > MAX_SKB_FRAGS);
1293
1294	skb_shinfo(skb)->nr_frags = j + 1;
1295	skb->len = rxcp->pkt_size;
1296	skb->data_len = rxcp->pkt_size;
1297	skb->ip_summed = CHECKSUM_UNNECESSARY;
1298	if (adapter->netdev->features & NETIF_F_RXHASH)
1299		skb->rxhash = rxcp->rss_hash;
1300
1301	if (rxcp->vlanf)
1302		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1303
1304	napi_gro_frags(napi);
1305}
1306
1307static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1308				 struct be_rx_compl_info *rxcp)
1309{
1310	rxcp->pkt_size =
1311		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1312	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1313	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1314	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1315	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1316	rxcp->ip_csum =
1317		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1318	rxcp->l4_csum =
1319		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1320	rxcp->ipv6 =
1321		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1322	rxcp->rxq_idx =
1323		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1324	rxcp->num_rcvd =
1325		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1326	rxcp->pkt_type =
1327		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1328	rxcp->rss_hash =
1329		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1330	if (rxcp->vlanf) {
1331		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1332					  compl);
1333		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1334					       compl);
1335	}
1336	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1337}
1338
1339static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1340				 struct be_rx_compl_info *rxcp)
1341{
1342	rxcp->pkt_size =
1343		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1344	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1345	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1346	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1347	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1348	rxcp->ip_csum =
1349		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1350	rxcp->l4_csum =
1351		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1352	rxcp->ipv6 =
1353		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1354	rxcp->rxq_idx =
1355		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1356	rxcp->num_rcvd =
1357		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1358	rxcp->pkt_type =
1359		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1360	rxcp->rss_hash =
1361		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1362	if (rxcp->vlanf) {
1363		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1364					  compl);
1365		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1366					       compl);
1367	}
1368	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1369}
1370
1371static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1372{
1373	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1374	struct be_rx_compl_info *rxcp = &rxo->rxcp;
1375	struct be_adapter *adapter = rxo->adapter;
1376
1377	/* For checking the valid bit it is Ok to use either definition as the
1378	 * valid bit is at the same position in both v0 and v1 Rx compl */
1379	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1380		return NULL;
1381
1382	rmb();
1383	be_dws_le_to_cpu(compl, sizeof(*compl));
1384
1385	if (adapter->be3_native)
1386		be_parse_rx_compl_v1(compl, rxcp);
1387	else
1388		be_parse_rx_compl_v0(compl, rxcp);
1389
1390	if (rxcp->vlanf) {
1391		/* vlanf could be wrongly set in some cards.
1392		 * ignore if vtm is not set */
1393		if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1394			rxcp->vlanf = 0;
1395
1396		if (!lancer_chip(adapter))
1397			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1398
1399		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1400		    !adapter->vlan_tag[rxcp->vlan_tag])
1401			rxcp->vlanf = 0;
1402	}
1403
1404	/* As the compl has been parsed, reset it; we wont touch it again */
1405	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1406
1407	queue_tail_inc(&rxo->cq);
1408	return rxcp;
1409}
1410
1411static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1412{
1413	u32 order = get_order(size);
1414
1415	if (order > 0)
1416		gfp |= __GFP_COMP;
1417	return  alloc_pages(gfp, order);
1418}
1419
1420/*
1421 * Allocate a page, split it to fragments of size rx_frag_size and post as
1422 * receive buffers to BE
1423 */
1424static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1425{
1426	struct be_adapter *adapter = rxo->adapter;
1427	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1428	struct be_queue_info *rxq = &rxo->q;
1429	struct page *pagep = NULL;
1430	struct be_eth_rx_d *rxd;
1431	u64 page_dmaaddr = 0, frag_dmaaddr;
1432	u32 posted, page_offset = 0;
1433
1434	page_info = &rxo->page_info_tbl[rxq->head];
1435	for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1436		if (!pagep) {
1437			pagep = be_alloc_pages(adapter->big_page_size, gfp);
1438			if (unlikely(!pagep)) {
1439				rx_stats(rxo)->rx_post_fail++;
1440				break;
1441			}
1442			page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1443						    0, adapter->big_page_size,
1444						    DMA_FROM_DEVICE);
1445			page_info->page_offset = 0;
1446		} else {
1447			get_page(pagep);
1448			page_info->page_offset = page_offset + rx_frag_size;
1449		}
1450		page_offset = page_info->page_offset;
1451		page_info->page = pagep;
1452		dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1453		frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1454
1455		rxd = queue_head_node(rxq);
1456		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1457		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1458
1459		/* Any space left in the current big page for another frag? */
1460		if ((page_offset + rx_frag_size + rx_frag_size) >
1461					adapter->big_page_size) {
1462			pagep = NULL;
1463			page_info->last_page_user = true;
1464		}
1465
1466		prev_page_info = page_info;
1467		queue_head_inc(rxq);
1468		page_info = &rxo->page_info_tbl[rxq->head];
1469	}
1470	if (pagep)
1471		prev_page_info->last_page_user = true;
1472
1473	if (posted) {
1474		atomic_add(posted, &rxq->used);
1475		be_rxq_notify(adapter, rxq->id, posted);
1476	} else if (atomic_read(&rxq->used) == 0) {
1477		/* Let be_worker replenish when memory is available */
1478		rxo->rx_post_starved = true;
1479	}
1480}
1481
1482static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1483{
1484	struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1485
1486	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1487		return NULL;
1488
1489	rmb();
1490	be_dws_le_to_cpu(txcp, sizeof(*txcp));
1491
1492	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1493
1494	queue_tail_inc(tx_cq);
1495	return txcp;
1496}
1497
1498static u16 be_tx_compl_process(struct be_adapter *adapter,
1499		struct be_tx_obj *txo, u16 last_index)
1500{
1501	struct be_queue_info *txq = &txo->q;
1502	struct be_eth_wrb *wrb;
1503	struct sk_buff **sent_skbs = txo->sent_skb_list;
1504	struct sk_buff *sent_skb;
1505	u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1506	bool unmap_skb_hdr = true;
1507
1508	sent_skb = sent_skbs[txq->tail];
1509	BUG_ON(!sent_skb);
1510	sent_skbs[txq->tail] = NULL;
1511
1512	/* skip header wrb */
1513	queue_tail_inc(txq);
1514
1515	do {
1516		cur_index = txq->tail;
1517		wrb = queue_tail_node(txq);
1518		unmap_tx_frag(&adapter->pdev->dev, wrb,
1519			      (unmap_skb_hdr && skb_headlen(sent_skb)));
1520		unmap_skb_hdr = false;
1521
1522		num_wrbs++;
1523		queue_tail_inc(txq);
1524	} while (cur_index != last_index);
1525
1526	kfree_skb(sent_skb);
1527	return num_wrbs;
1528}
1529
1530/* Return the number of events in the event queue */
1531static inline int events_get(struct be_eq_obj *eqo)
1532{
1533	struct be_eq_entry *eqe;
1534	int num = 0;
1535
1536	do {
1537		eqe = queue_tail_node(&eqo->q);
1538		if (eqe->evt == 0)
1539			break;
1540
1541		rmb();
1542		eqe->evt = 0;
1543		num++;
1544		queue_tail_inc(&eqo->q);
1545	} while (true);
1546
1547	return num;
1548}
1549
1550static int event_handle(struct be_eq_obj *eqo)
1551{
1552	bool rearm = false;
1553	int num = events_get(eqo);
1554
1555	/* Deal with any spurious interrupts that come without events */
1556	if (!num)
1557		rearm = true;
1558
1559	be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1560	if (num)
1561		napi_schedule(&eqo->napi);
1562
1563	return num;
1564}
1565
1566/* Leaves the EQ is disarmed state */
1567static void be_eq_clean(struct be_eq_obj *eqo)
1568{
1569	int num = events_get(eqo);
1570
1571	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1572}
1573
1574static void be_rx_cq_clean(struct be_rx_obj *rxo)
1575{
1576	struct be_rx_page_info *page_info;
1577	struct be_queue_info *rxq = &rxo->q;
1578	struct be_queue_info *rx_cq = &rxo->cq;
1579	struct be_rx_compl_info *rxcp;
1580	u16 tail;
1581
1582	/* First cleanup pending rx completions */
1583	while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1584		be_rx_compl_discard(rxo, rxcp);
1585		be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1586	}
1587
1588	/* Then free posted rx buffer that were not used */
1589	tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1590	for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1591		page_info = get_rx_page_info(rxo, tail);
1592		put_page(page_info->page);
1593		memset(page_info, 0, sizeof(*page_info));
1594	}
1595	BUG_ON(atomic_read(&rxq->used));
1596	rxq->tail = rxq->head = 0;
1597}
1598
1599static void be_tx_compl_clean(struct be_adapter *adapter)
1600{
1601	struct be_tx_obj *txo;
1602	struct be_queue_info *txq;
1603	struct be_eth_tx_compl *txcp;
1604	u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1605	struct sk_buff *sent_skb;
1606	bool dummy_wrb;
1607	int i, pending_txqs;
1608
1609	/* Wait for a max of 200ms for all the tx-completions to arrive. */
1610	do {
1611		pending_txqs = adapter->num_tx_qs;
1612
1613		for_all_tx_queues(adapter, txo, i) {
1614			txq = &txo->q;
1615			while ((txcp = be_tx_compl_get(&txo->cq))) {
1616				end_idx =
1617					AMAP_GET_BITS(struct amap_eth_tx_compl,
1618						      wrb_index, txcp);
1619				num_wrbs += be_tx_compl_process(adapter, txo,
1620								end_idx);
1621				cmpl++;
1622			}
1623			if (cmpl) {
1624				be_cq_notify(adapter, txo->cq.id, false, cmpl);
1625				atomic_sub(num_wrbs, &txq->used);
1626				cmpl = 0;
1627				num_wrbs = 0;
1628			}
1629			if (atomic_read(&txq->used) == 0)
1630				pending_txqs--;
1631		}
1632
1633		if (pending_txqs == 0 || ++timeo > 200)
1634			break;
1635
1636		mdelay(1);
1637	} while (true);
1638
1639	for_all_tx_queues(adapter, txo, i) {
1640		txq = &txo->q;
1641		if (atomic_read(&txq->used))
1642			dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1643				atomic_read(&txq->used));
1644
1645		/* free posted tx for which compls will never arrive */
1646		while (atomic_read(&txq->used)) {
1647			sent_skb = txo->sent_skb_list[txq->tail];
1648			end_idx = txq->tail;
1649			num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1650						   &dummy_wrb);
1651			index_adv(&end_idx, num_wrbs - 1, txq->len);
1652			num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1653			atomic_sub(num_wrbs, &txq->used);
1654		}
1655	}
1656}
1657
1658static void be_evt_queues_destroy(struct be_adapter *adapter)
1659{
1660	struct be_eq_obj *eqo;
1661	int i;
1662
1663	for_all_evt_queues(adapter, eqo, i) {
1664		be_eq_clean(eqo);
1665		if (eqo->q.created)
1666			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1667		be_queue_free(adapter, &eqo->q);
1668	}
1669}
1670
1671static int be_evt_queues_create(struct be_adapter *adapter)
1672{
1673	struct be_queue_info *eq;
1674	struct be_eq_obj *eqo;
1675	int i, rc;
1676
1677	adapter->num_evt_qs = num_irqs(adapter);
1678
1679	for_all_evt_queues(adapter, eqo, i) {
1680		eqo->adapter = adapter;
1681		eqo->tx_budget = BE_TX_BUDGET;
1682		eqo->idx = i;
1683		eqo->max_eqd = BE_MAX_EQD;
1684		eqo->enable_aic = true;
1685
1686		eq = &eqo->q;
1687		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1688					sizeof(struct be_eq_entry));
1689		if (rc)
1690			return rc;
1691
1692		rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1693		if (rc)
1694			return rc;
1695	}
1696	return 0;
1697}
1698
1699static void be_mcc_queues_destroy(struct be_adapter *adapter)
1700{
1701	struct be_queue_info *q;
1702
1703	q = &adapter->mcc_obj.q;
1704	if (q->created)
1705		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1706	be_queue_free(adapter, q);
1707
1708	q = &adapter->mcc_obj.cq;
1709	if (q->created)
1710		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1711	be_queue_free(adapter, q);
1712}
1713
1714/* Must be called only after TX qs are created as MCC shares TX EQ */
1715static int be_mcc_queues_create(struct be_adapter *adapter)
1716{
1717	struct be_queue_info *q, *cq;
1718
1719	cq = &adapter->mcc_obj.cq;
1720	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1721			sizeof(struct be_mcc_compl)))
1722		goto err;
1723
1724	/* Use the default EQ for MCC completions */
1725	if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1726		goto mcc_cq_free;
1727
1728	q = &adapter->mcc_obj.q;
1729	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1730		goto mcc_cq_destroy;
1731
1732	if (be_cmd_mccq_create(adapter, q, cq))
1733		goto mcc_q_free;
1734
1735	return 0;
1736
1737mcc_q_free:
1738	be_queue_free(adapter, q);
1739mcc_cq_destroy:
1740	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1741mcc_cq_free:
1742	be_queue_free(adapter, cq);
1743err:
1744	return -1;
1745}
1746
1747static void be_tx_queues_destroy(struct be_adapter *adapter)
1748{
1749	struct be_queue_info *q;
1750	struct be_tx_obj *txo;
1751	u8 i;
1752
1753	for_all_tx_queues(adapter, txo, i) {
1754		q = &txo->q;
1755		if (q->created)
1756			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1757		be_queue_free(adapter, q);
1758
1759		q = &txo->cq;
1760		if (q->created)
1761			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1762		be_queue_free(adapter, q);
1763	}
1764}
1765
1766static int be_num_txqs_want(struct be_adapter *adapter)
1767{
1768	if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1769		lancer_chip(adapter) || !be_physfn(adapter) ||
1770		adapter->generation == BE_GEN2)
1771		return 1;
1772	else
1773		return MAX_TX_QS;
1774}
1775
1776static int be_tx_cqs_create(struct be_adapter *adapter)
1777{
1778	struct be_queue_info *cq, *eq;
1779	int status;
1780	struct be_tx_obj *txo;
1781	u8 i;
1782
1783	adapter->num_tx_qs = be_num_txqs_want(adapter);
1784	if (adapter->num_tx_qs != MAX_TX_QS) {
1785		rtnl_lock();
1786		netif_set_real_num_tx_queues(adapter->netdev,
1787			adapter->num_tx_qs);
1788		rtnl_unlock();
1789	}
1790
1791	for_all_tx_queues(adapter, txo, i) {
1792		cq = &txo->cq;
1793		status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1794					sizeof(struct be_eth_tx_compl));
1795		if (status)
1796			return status;
1797
1798		/* If num_evt_qs is less than num_tx_qs, then more than
1799		 * one txq share an eq
1800		 */
1801		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1802		status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1803		if (status)
1804			return status;
1805	}
1806	return 0;
1807}
1808
1809static int be_tx_qs_create(struct be_adapter *adapter)
1810{
1811	struct be_tx_obj *txo;
1812	int i, status;
1813
1814	for_all_tx_queues(adapter, txo, i) {
1815		status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1816					sizeof(struct be_eth_wrb));
1817		if (status)
1818			return status;
1819
1820		status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1821		if (status)
1822			return status;
1823	}
1824
1825	return 0;
1826}
1827
1828static void be_rx_cqs_destroy(struct be_adapter *adapter)
1829{
1830	struct be_queue_info *q;
1831	struct be_rx_obj *rxo;
1832	int i;
1833
1834	for_all_rx_queues(adapter, rxo, i) {
1835		q = &rxo->cq;
1836		if (q->created)
1837			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1838		be_queue_free(adapter, q);
1839	}
1840}
1841
1842static int be_rx_cqs_create(struct be_adapter *adapter)
1843{
1844	struct be_queue_info *eq, *cq;
1845	struct be_rx_obj *rxo;
1846	int rc, i;
1847
1848	/* We'll create as many RSS rings as there are irqs.
1849	 * But when there's only one irq there's no use creating RSS rings
1850	 */
1851	adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1852				num_irqs(adapter) + 1 : 1;
1853
1854	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1855	for_all_rx_queues(adapter, rxo, i) {
1856		rxo->adapter = adapter;
1857		cq = &rxo->cq;
1858		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1859				sizeof(struct be_eth_rx_compl));
1860		if (rc)
1861			return rc;
1862
1863		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1864		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1865		if (rc)
1866			return rc;
1867	}
1868
1869	if (adapter->num_rx_qs != MAX_RX_QS)
1870		dev_info(&adapter->pdev->dev,
1871			"Created only %d receive queues", adapter->num_rx_qs);
1872
1873	return 0;
1874}
1875
1876static irqreturn_t be_intx(int irq, void *dev)
1877{
1878	struct be_adapter *adapter = dev;
1879	int num_evts;
1880
1881	/* With INTx only one EQ is used */
1882	num_evts = event_handle(&adapter->eq_obj[0]);
1883	if (num_evts)
1884		return IRQ_HANDLED;
1885	else
1886		return IRQ_NONE;
1887}
1888
1889static irqreturn_t be_msix(int irq, void *dev)
1890{
1891	struct be_eq_obj *eqo = dev;
1892
1893	event_handle(eqo);
1894	return IRQ_HANDLED;
1895}
1896
1897static inline bool do_gro(struct be_rx_compl_info *rxcp)
1898{
1899	return (rxcp->tcpf && !rxcp->err) ? true : false;
1900}
1901
1902static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1903			int budget)
1904{
1905	struct be_adapter *adapter = rxo->adapter;
1906	struct be_queue_info *rx_cq = &rxo->cq;
1907	struct be_rx_compl_info *rxcp;
1908	u32 work_done;
1909
1910	for (work_done = 0; work_done < budget; work_done++) {
1911		rxcp = be_rx_compl_get(rxo);
1912		if (!rxcp)
1913			break;
1914
1915		/* Is it a flush compl that has no data */
1916		if (unlikely(rxcp->num_rcvd == 0))
1917			goto loop_continue;
1918
1919		/* Discard compl with partial DMA Lancer B0 */
1920		if (unlikely(!rxcp->pkt_size)) {
1921			be_rx_compl_discard(rxo, rxcp);
1922			goto loop_continue;
1923		}
1924
1925		/* On BE drop pkts that arrive due to imperfect filtering in
1926		 * promiscuous mode on some skews
1927		 */
1928		if (unlikely(rxcp->port != adapter->port_num &&
1929				!lancer_chip(adapter))) {
1930			be_rx_compl_discard(rxo, rxcp);
1931			goto loop_continue;
1932		}
1933
1934		if (do_gro(rxcp))
1935			be_rx_compl_process_gro(rxo, napi, rxcp);
1936		else
1937			be_rx_compl_process(rxo, rxcp);
1938loop_continue:
1939		be_rx_stats_update(rxo, rxcp);
1940	}
1941
1942	if (work_done) {
1943		be_cq_notify(adapter, rx_cq->id, true, work_done);
1944
1945		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1946			be_post_rx_frags(rxo, GFP_ATOMIC);
1947	}
1948
1949	return work_done;
1950}
1951
1952static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1953			  int budget, int idx)
1954{
1955	struct be_eth_tx_compl *txcp;
1956	int num_wrbs = 0, work_done;
1957
1958	for (work_done = 0; work_done < budget; work_done++) {
1959		txcp = be_tx_compl_get(&txo->cq);
1960		if (!txcp)
1961			break;
1962		num_wrbs += be_tx_compl_process(adapter, txo,
1963				AMAP_GET_BITS(struct amap_eth_tx_compl,
1964					wrb_index, txcp));
1965	}
1966
1967	if (work_done) {
1968		be_cq_notify(adapter, txo->cq.id, true, work_done);
1969		atomic_sub(num_wrbs, &txo->q.used);
1970
1971		/* As Tx wrbs have been freed up, wake up netdev queue
1972		 * if it was stopped due to lack of tx wrbs.  */
1973		if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1974			atomic_read(&txo->q.used) < txo->q.len / 2) {
1975			netif_wake_subqueue(adapter->netdev, idx);
1976		}
1977
1978		u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1979		tx_stats(txo)->tx_compl += work_done;
1980		u64_stats_update_end(&tx_stats(txo)->sync_compl);
1981	}
1982	return (work_done < budget); /* Done */
1983}
1984
1985int be_poll(struct napi_struct *napi, int budget)
1986{
1987	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
1988	struct be_adapter *adapter = eqo->adapter;
1989	int max_work = 0, work, i;
1990	bool tx_done;
1991
1992	/* Process all TXQs serviced by this EQ */
1993	for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
1994		tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
1995					eqo->tx_budget, i);
1996		if (!tx_done)
1997			max_work = budget;
1998	}
1999
2000	/* This loop will iterate twice for EQ0 in which
2001	 * completions of the last RXQ (default one) are also processed
2002	 * For other EQs the loop iterates only once
2003	 */
2004	for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2005		work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2006		max_work = max(work, max_work);
2007	}
2008
2009	if (is_mcc_eqo(eqo))
2010		be_process_mcc(adapter);
2011
2012	if (max_work < budget) {
2013		napi_complete(napi);
2014		be_eq_notify(adapter, eqo->q.id, true, false, 0);
2015	} else {
2016		/* As we'll continue in polling mode, count and clear events */
2017		be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2018	}
2019	return max_work;
2020}
2021
2022void be_detect_dump_ue(struct be_adapter *adapter)
2023{
2024	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2025	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2026	u32 i;
2027
2028	if (adapter->eeh_err || adapter->ue_detected)
2029		return;
2030
2031	if (lancer_chip(adapter)) {
2032		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2033		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2034			sliport_err1 = ioread32(adapter->db +
2035					SLIPORT_ERROR1_OFFSET);
2036			sliport_err2 = ioread32(adapter->db +
2037					SLIPORT_ERROR2_OFFSET);
2038		}
2039	} else {
2040		pci_read_config_dword(adapter->pdev,
2041				PCICFG_UE_STATUS_LOW, &ue_lo);
2042		pci_read_config_dword(adapter->pdev,
2043				PCICFG_UE_STATUS_HIGH, &ue_hi);
2044		pci_read_config_dword(adapter->pdev,
2045				PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2046		pci_read_config_dword(adapter->pdev,
2047				PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2048
2049		ue_lo = (ue_lo & (~ue_lo_mask));
2050		ue_hi = (ue_hi & (~ue_hi_mask));
2051	}
2052
2053	if (ue_lo || ue_hi ||
2054		sliport_status & SLIPORT_STATUS_ERR_MASK) {
2055		adapter->ue_detected = true;
2056		adapter->eeh_err = true;
2057		dev_err(&adapter->pdev->dev,
2058			"Unrecoverable error in the card\n");
2059	}
2060
2061	if (ue_lo) {
2062		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2063			if (ue_lo & 1)
2064				dev_err(&adapter->pdev->dev,
2065				"UE: %s bit set\n", ue_status_low_desc[i]);
2066		}
2067	}
2068	if (ue_hi) {
2069		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2070			if (ue_hi & 1)
2071				dev_err(&adapter->pdev->dev,
2072				"UE: %s bit set\n", ue_status_hi_desc[i]);
2073		}
2074	}
2075
2076	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2077		dev_err(&adapter->pdev->dev,
2078			"sliport status 0x%x\n", sliport_status);
2079		dev_err(&adapter->pdev->dev,
2080			"sliport error1 0x%x\n", sliport_err1);
2081		dev_err(&adapter->pdev->dev,
2082			"sliport error2 0x%x\n", sliport_err2);
2083	}
2084}
2085
2086static void be_msix_disable(struct be_adapter *adapter)
2087{
2088	if (msix_enabled(adapter)) {
2089		pci_disable_msix(adapter->pdev);
2090		adapter->num_msix_vec = 0;
2091	}
2092}
2093
2094static uint be_num_rss_want(struct be_adapter *adapter)
2095{
2096	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2097	     adapter->num_vfs == 0 && be_physfn(adapter) &&
2098	     !be_is_mc(adapter))
2099		return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2100	else
2101		return 0;
2102}
2103
2104static void be_msix_enable(struct be_adapter *adapter)
2105{
2106#define BE_MIN_MSIX_VECTORS		1
2107	int i, status, num_vec;
2108
2109	/* If RSS queues are not used, need a vec for default RX Q */
2110	num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2111	num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2112
2113	for (i = 0; i < num_vec; i++)
2114		adapter->msix_entries[i].entry = i;
2115
2116	status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2117	if (status == 0) {
2118		goto done;
2119	} else if (status >= BE_MIN_MSIX_VECTORS) {
2120		num_vec = status;
2121		if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2122				num_vec) == 0)
2123			goto done;
2124	}
2125	return;
2126done:
2127	adapter->num_msix_vec = num_vec;
2128	return;
2129}
2130
2131static int be_sriov_enable(struct be_adapter *adapter)
2132{
2133	be_check_sriov_fn_type(adapter);
2134
2135#ifdef CONFIG_PCI_IOV
2136	if (be_physfn(adapter) && num_vfs) {
2137		int status, pos;
2138		u16 dev_vfs;
2139
2140		pos = pci_find_ext_capability(adapter->pdev,
2141						PCI_EXT_CAP_ID_SRIOV);
2142		pci_read_config_word(adapter->pdev,
2143				     pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2144
2145		adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2146		if (adapter->num_vfs != num_vfs)
2147			dev_info(&adapter->pdev->dev,
2148				 "Device supports %d VFs and not %d\n",
2149				 adapter->num_vfs, num_vfs);
2150
2151		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2152		if (status)
2153			adapter->num_vfs = 0;
2154
2155		if (adapter->num_vfs) {
2156			adapter->vf_cfg = kcalloc(num_vfs,
2157						sizeof(struct be_vf_cfg),
2158						GFP_KERNEL);
2159			if (!adapter->vf_cfg)
2160				return -ENOMEM;
2161		}
2162	}
2163#endif
2164	return 0;
2165}
2166
2167static void be_sriov_disable(struct be_adapter *adapter)
2168{
2169#ifdef CONFIG_PCI_IOV
2170	if (sriov_enabled(adapter)) {
2171		pci_disable_sriov(adapter->pdev);
2172		kfree(adapter->vf_cfg);
2173		adapter->num_vfs = 0;
2174	}
2175#endif
2176}
2177
2178static inline int be_msix_vec_get(struct be_adapter *adapter,
2179				struct be_eq_obj *eqo)
2180{
2181	return adapter->msix_entries[eqo->idx].vector;
2182}
2183
2184static int be_msix_register(struct be_adapter *adapter)
2185{
2186	struct net_device *netdev = adapter->netdev;
2187	struct be_eq_obj *eqo;
2188	int status, i, vec;
2189
2190	for_all_evt_queues(adapter, eqo, i) {
2191		sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2192		vec = be_msix_vec_get(adapter, eqo);
2193		status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2194		if (status)
2195			goto err_msix;
2196	}
2197
2198	return 0;
2199err_msix:
2200	for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2201		free_irq(be_msix_vec_get(adapter, eqo), eqo);
2202	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2203		status);
2204	be_msix_disable(adapter);
2205	return status;
2206}
2207
2208static int be_irq_register(struct be_adapter *adapter)
2209{
2210	struct net_device *netdev = adapter->netdev;
2211	int status;
2212
2213	if (msix_enabled(adapter)) {
2214		status = be_msix_register(adapter);
2215		if (status == 0)
2216			goto done;
2217		/* INTx is not supported for VF */
2218		if (!be_physfn(adapter))
2219			return status;
2220	}
2221
2222	/* INTx */
2223	netdev->irq = adapter->pdev->irq;
2224	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2225			adapter);
2226	if (status) {
2227		dev_err(&adapter->pdev->dev,
2228			"INTx request IRQ failed - err %d\n", status);
2229		return status;
2230	}
2231done:
2232	adapter->isr_registered = true;
2233	return 0;
2234}
2235
2236static void be_irq_unregister(struct be_adapter *adapter)
2237{
2238	struct net_device *netdev = adapter->netdev;
2239	struct be_eq_obj *eqo;
2240	int i;
2241
2242	if (!adapter->isr_registered)
2243		return;
2244
2245	/* INTx */
2246	if (!msix_enabled(adapter)) {
2247		free_irq(netdev->irq, adapter);
2248		goto done;
2249	}
2250
2251	/* MSIx */
2252	for_all_evt_queues(adapter, eqo, i)
2253		free_irq(be_msix_vec_get(adapter, eqo), eqo);
2254
2255done:
2256	adapter->isr_registered = false;
2257}
2258
2259static void be_rx_qs_destroy(struct be_adapter *adapter)
2260{
2261	struct be_queue_info *q;
2262	struct be_rx_obj *rxo;
2263	int i;
2264
2265	for_all_rx_queues(adapter, rxo, i) {
2266		q = &rxo->q;
2267		if (q->created) {
2268			be_cmd_rxq_destroy(adapter, q);
2269			/* After the rxq is invalidated, wait for a grace time
2270			 * of 1ms for all dma to end and the flush compl to
2271			 * arrive
2272			 */
2273			mdelay(1);
2274			be_rx_cq_clean(rxo);
2275		}
2276		be_queue_free(adapter, q);
2277	}
2278}
2279
2280static int be_close(struct net_device *netdev)
2281{
2282	struct be_adapter *adapter = netdev_priv(netdev);
2283	struct be_eq_obj *eqo;
2284	int i;
2285
2286	be_async_mcc_disable(adapter);
2287
2288	if (!lancer_chip(adapter))
2289		be_intr_set(adapter, false);
2290
2291	for_all_evt_queues(adapter, eqo, i) {
2292		napi_disable(&eqo->napi);
2293		if (msix_enabled(adapter))
2294			synchronize_irq(be_msix_vec_get(adapter, eqo));
2295		else
2296			synchronize_irq(netdev->irq);
2297		be_eq_clean(eqo);
2298	}
2299
2300	be_irq_unregister(adapter);
2301
2302	/* Wait for all pending tx completions to arrive so that
2303	 * all tx skbs are freed.
2304	 */
2305	be_tx_compl_clean(adapter);
2306
2307	be_rx_qs_destroy(adapter);
2308	return 0;
2309}
2310
2311static int be_rx_qs_create(struct be_adapter *adapter)
2312{
2313	struct be_rx_obj *rxo;
2314	int rc, i, j;
2315	u8 rsstable[128];
2316
2317	for_all_rx_queues(adapter, rxo, i) {
2318		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2319				    sizeof(struct be_eth_rx_d));
2320		if (rc)
2321			return rc;
2322	}
2323
2324	/* The FW would like the default RXQ to be created first */
2325	rxo = default_rxo(adapter);
2326	rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2327			       adapter->if_handle, false, &rxo->rss_id);
2328	if (rc)
2329		return rc;
2330
2331	for_all_rss_queues(adapter, rxo, i) {
2332		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2333				       rx_frag_size, adapter->if_handle,
2334				       true, &rxo->rss_id);
2335		if (rc)
2336			return rc;
2337	}
2338
2339	if (be_multi_rxq(adapter)) {
2340		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2341			for_all_rss_queues(adapter, rxo, i) {
2342				if ((j + i) >= 128)
2343					break;
2344				rsstable[j + i] = rxo->rss_id;
2345			}
2346		}
2347		rc = be_cmd_rss_config(adapter, rsstable, 128);
2348		if (rc)
2349			return rc;
2350	}
2351
2352	/* First time posting */
2353	for_all_rx_queues(adapter, rxo, i)
2354		be_post_rx_frags(rxo, GFP_KERNEL);
2355	return 0;
2356}
2357
2358static int be_open(struct net_device *netdev)
2359{
2360	struct be_adapter *adapter = netdev_priv(netdev);
2361	struct be_eq_obj *eqo;
2362	struct be_rx_obj *rxo;
2363	struct be_tx_obj *txo;
2364	u8 link_status;
2365	int status, i;
2366
2367	status = be_rx_qs_create(adapter);
2368	if (status)
2369		goto err;
2370
2371	be_irq_register(adapter);
2372
2373	if (!lancer_chip(adapter))
2374		be_intr_set(adapter, true);
2375
2376	for_all_rx_queues(adapter, rxo, i)
2377		be_cq_notify(adapter, rxo->cq.id, true, 0);
2378
2379	for_all_tx_queues(adapter, txo, i)
2380		be_cq_notify(adapter, txo->cq.id, true, 0);
2381
2382	be_async_mcc_enable(adapter);
2383
2384	for_all_evt_queues(adapter, eqo, i) {
2385		napi_enable(&eqo->napi);
2386		be_eq_notify(adapter, eqo->q.id, true, false, 0);
2387	}
2388
2389	status = be_cmd_link_status_query(adapter, NULL, NULL,
2390					  &link_status, 0);
2391	if (!status)
2392		be_link_status_update(adapter, link_status);
2393
2394	return 0;
2395err:
2396	be_close(adapter->netdev);
2397	return -EIO;
2398}
2399
2400static int be_setup_wol(struct be_adapter *adapter, bool enable)
2401{
2402	struct be_dma_mem cmd;
2403	int status = 0;
2404	u8 mac[ETH_ALEN];
2405
2406	memset(mac, 0, ETH_ALEN);
2407
2408	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2409	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2410				    GFP_KERNEL);
2411	if (cmd.va == NULL)
2412		return -1;
2413	memset(cmd.va, 0, cmd.size);
2414
2415	if (enable) {
2416		status = pci_write_config_dword(adapter->pdev,
2417			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2418		if (status) {
2419			dev_err(&adapter->pdev->dev,
2420				"Could not enable Wake-on-lan\n");
2421			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2422					  cmd.dma);
2423			return status;
2424		}
2425		status = be_cmd_enable_magic_wol(adapter,
2426				adapter->netdev->dev_addr, &cmd);
2427		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2428		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2429	} else {
2430		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2431		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2432		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2433	}
2434
2435	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2436	return status;
2437}
2438
2439/*
2440 * Generate a seed MAC address from the PF MAC Address using jhash.
2441 * MAC Address for VFs are assigned incrementally starting from the seed.
2442 * These addresses are programmed in the ASIC by the PF and the VF driver
2443 * queries for the MAC address during its probe.
2444 */
2445static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2446{
2447	u32 vf;
2448	int status = 0;
2449	u8 mac[ETH_ALEN];
2450	struct be_vf_cfg *vf_cfg;
2451
2452	be_vf_eth_addr_generate(adapter, mac);
2453
2454	for_all_vfs(adapter, vf_cfg, vf) {
2455		if (lancer_chip(adapter)) {
2456			status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2457		} else {
2458			status = be_cmd_pmac_add(adapter, mac,
2459						 vf_cfg->if_handle,
2460						 &vf_cfg->pmac_id, vf + 1);
2461		}
2462
2463		if (status)
2464			dev_err(&adapter->pdev->dev,
2465			"Mac address assignment failed for VF %d\n", vf);
2466		else
2467			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2468
2469		mac[5] += 1;
2470	}
2471	return status;
2472}
2473
2474static void be_vf_clear(struct be_adapter *adapter)
2475{
2476	struct be_vf_cfg *vf_cfg;
2477	u32 vf;
2478
2479	for_all_vfs(adapter, vf_cfg, vf) {
2480		if (lancer_chip(adapter))
2481			be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2482		else
2483			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2484					vf_cfg->pmac_id, vf + 1);
2485
2486		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2487	}
2488}
2489
2490static int be_clear(struct be_adapter *adapter)
2491{
2492	int i = 1;
2493
2494	if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2495		cancel_delayed_work_sync(&adapter->work);
2496		adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2497	}
2498
2499	if (sriov_enabled(adapter))
2500		be_vf_clear(adapter);
2501
2502	for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2503		be_cmd_pmac_del(adapter, adapter->if_handle,
2504			adapter->pmac_id[i], 0);
2505
2506	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2507
2508	be_mcc_queues_destroy(adapter);
2509	be_rx_cqs_destroy(adapter);
2510	be_tx_queues_destroy(adapter);
2511	be_evt_queues_destroy(adapter);
2512
2513	/* tell fw we're done with firing cmds */
2514	be_cmd_fw_clean(adapter);
2515
2516	be_msix_disable(adapter);
2517	kfree(adapter->pmac_id);
2518	return 0;
2519}
2520
2521static void be_vf_setup_init(struct be_adapter *adapter)
2522{
2523	struct be_vf_cfg *vf_cfg;
2524	int vf;
2525
2526	for_all_vfs(adapter, vf_cfg, vf) {
2527		vf_cfg->if_handle = -1;
2528		vf_cfg->pmac_id = -1;
2529	}
2530}
2531
2532static int be_vf_setup(struct be_adapter *adapter)
2533{
2534	struct be_vf_cfg *vf_cfg;
2535	u32 cap_flags, en_flags, vf;
2536	u16 def_vlan, lnk_speed;
2537	int status;
2538
2539	be_vf_setup_init(adapter);
2540
2541	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2542				BE_IF_FLAGS_MULTICAST;
2543	for_all_vfs(adapter, vf_cfg, vf) {
2544		status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2545					  &vf_cfg->if_handle, NULL, vf + 1);
2546		if (status)
2547			goto err;
2548	}
2549
2550	status = be_vf_eth_addr_config(adapter);
2551	if (status)
2552		goto err;
2553
2554	for_all_vfs(adapter, vf_cfg, vf) {
2555		status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2556						  NULL, vf + 1);
2557		if (status)
2558			goto err;
2559		vf_cfg->tx_rate = lnk_speed * 10;
2560
2561		status = be_cmd_get_hsw_config(adapter, &def_vlan,
2562				vf + 1, vf_cfg->if_handle);
2563		if (status)
2564			goto err;
2565		vf_cfg->def_vid = def_vlan;
2566	}
2567	return 0;
2568err:
2569	return status;
2570}
2571
2572static void be_setup_init(struct be_adapter *adapter)
2573{
2574	adapter->vlan_prio_bmap = 0xff;
2575	adapter->link_speed = -1;
2576	adapter->if_handle = -1;
2577	adapter->be3_native = false;
2578	adapter->promiscuous = false;
2579	adapter->eq_next_idx = 0;
2580}
2581
2582static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2583{
2584	u32 pmac_id;
2585	int status;
2586	bool pmac_id_active;
2587
2588	status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2589							&pmac_id, mac);
2590	if (status != 0)
2591		goto do_none;
2592
2593	if (pmac_id_active) {
2594		status = be_cmd_mac_addr_query(adapter, mac,
2595				MAC_ADDRESS_TYPE_NETWORK,
2596				false, adapter->if_handle, pmac_id);
2597
2598		if (!status)
2599			adapter->pmac_id[0] = pmac_id;
2600	} else {
2601		status = be_cmd_pmac_add(adapter, mac,
2602				adapter->if_handle, &adapter->pmac_id[0], 0);
2603	}
2604do_none:
2605	return status;
2606}
2607
2608static int be_setup(struct be_adapter *adapter)
2609{
2610	struct net_device *netdev = adapter->netdev;
2611	u32 cap_flags, en_flags;
2612	u32 tx_fc, rx_fc;
2613	int status;
2614	u8 mac[ETH_ALEN];
2615
2616	be_setup_init(adapter);
2617
2618	be_cmd_req_native_mode(adapter);
2619
2620	be_msix_enable(adapter);
2621
2622	status = be_evt_queues_create(adapter);
2623	if (status)
2624		goto err;
2625
2626	status = be_tx_cqs_create(adapter);
2627	if (status)
2628		goto err;
2629
2630	status = be_rx_cqs_create(adapter);
2631	if (status)
2632		goto err;
2633
2634	status = be_mcc_queues_create(adapter);
2635	if (status)
2636		goto err;
2637
2638	memset(mac, 0, ETH_ALEN);
2639	status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2640			true /*permanent */, 0, 0);
2641	if (status)
2642		return status;
2643	memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2644	memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2645
2646	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2647			BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2648	cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2649			BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2650
2651	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2652		cap_flags |= BE_IF_FLAGS_RSS;
2653		en_flags |= BE_IF_FLAGS_RSS;
2654	}
2655	status = be_cmd_if_create(adapter, cap_flags, en_flags,
2656			netdev->dev_addr, &adapter->if_handle,
2657			&adapter->pmac_id[0], 0);
2658	if (status != 0)
2659		goto err;
2660
2661	 /* The VF's permanent mac queried from card is incorrect.
2662	  * For BEx: Query the mac configued by the PF using if_handle
2663	  * For Lancer: Get and use mac_list to obtain mac address.
2664	  */
2665	if (!be_physfn(adapter)) {
2666		if (lancer_chip(adapter))
2667			status = be_add_mac_from_list(adapter, mac);
2668		else
2669			status = be_cmd_mac_addr_query(adapter, mac,
2670					MAC_ADDRESS_TYPE_NETWORK, false,
2671					adapter->if_handle, 0);
2672		if (!status) {
2673			memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2674			memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2675		}
2676	}
2677
2678	status = be_tx_qs_create(adapter);
2679	if (status)
2680		goto err;
2681
2682	be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2683
2684	status = be_vid_config(adapter, false, 0);
2685	if (status)
2686		goto err;
2687
2688	be_set_rx_mode(adapter->netdev);
2689
2690	status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2691	/* For Lancer: It is legal for this cmd to fail on VF */
2692	if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2693		goto err;
2694
2695	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2696		status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2697					adapter->rx_fc);
2698		/* For Lancer: It is legal for this cmd to fail on VF */
2699		if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2700			goto err;
2701	}
2702
2703	pcie_set_readrq(adapter->pdev, 4096);
2704
2705	if (sriov_enabled(adapter)) {
2706		status = be_vf_setup(adapter);
2707		if (status)
2708			goto err;
2709	}
2710
2711	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2712	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2713
2714	return 0;
2715err:
2716	be_clear(adapter);
2717	return status;
2718}
2719
2720#ifdef CONFIG_NET_POLL_CONTROLLER
2721static void be_netpoll(struct net_device *netdev)
2722{
2723	struct be_adapter *adapter = netdev_priv(netdev);
2724	struct be_eq_obj *eqo;
2725	int i;
2726
2727	for_all_evt_queues(adapter, eqo, i)
2728		event_handle(eqo);
2729
2730	return;
2731}
2732#endif
2733
2734#define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
2735static bool be_flash_redboot(struct be_adapter *adapter,
2736			const u8 *p, u32 img_start, int image_size,
2737			int hdr_size)
2738{
2739	u32 crc_offset;
2740	u8 flashed_crc[4];
2741	int status;
2742
2743	crc_offset = hdr_size + img_start + image_size - 4;
2744
2745	p += crc_offset;
2746
2747	status = be_cmd_get_flash_crc(adapter, flashed_crc,
2748			(image_size - 4));
2749	if (status) {
2750		dev_err(&adapter->pdev->dev,
2751		"could not get crc from flash, not flashing redboot\n");
2752		return false;
2753	}
2754
2755	/*update redboot only if crc does not match*/
2756	if (!memcmp(flashed_crc, p, 4))
2757		return false;
2758	else
2759		return true;
2760}
2761
2762static bool phy_flashing_required(struct be_adapter *adapter)
2763{
2764	int status = 0;
2765	struct be_phy_info phy_info;
2766
2767	status = be_cmd_get_phy_info(adapter, &phy_info);
2768	if (status)
2769		return false;
2770	if ((phy_info.phy_type == TN_8022) &&
2771		(phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2772		return true;
2773	}
2774	return false;
2775}
2776
2777static int be_flash_data(struct be_adapter *adapter,
2778			const struct firmware *fw,
2779			struct be_dma_mem *flash_cmd, int num_of_images)
2780
2781{
2782	int status = 0, i, filehdr_size = 0;
2783	u32 total_bytes = 0, flash_op;
2784	int num_bytes;
2785	const u8 *p = fw->data;
2786	struct be_cmd_write_flashrom *req = flash_cmd->va;
2787	const struct flash_comp *pflashcomp;
2788	int num_comp;
2789
2790	static const struct flash_comp gen3_flash_types[10] = {
2791		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2792			FLASH_IMAGE_MAX_SIZE_g3},
2793		{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2794			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2795		{ FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2796			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2797		{ FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2798			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2799		{ FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2800			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2801		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2802			FLASH_IMAGE_MAX_SIZE_g3},
2803		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2804			FLASH_IMAGE_MAX_SIZE_g3},
2805		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2806			FLASH_IMAGE_MAX_SIZE_g3},
2807		{ FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2808			FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2809		{ FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2810			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2811	};
2812	static const struct flash_comp gen2_flash_types[8] = {
2813		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2814			FLASH_IMAGE_MAX_SIZE_g2},
2815		{ FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2816			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2817		{ FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2818			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2819		{ FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2820			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2821		{ FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2822			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2823		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2824			FLASH_IMAGE_MAX_SIZE_g2},
2825		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2826			FLASH_IMAGE_MAX_SIZE_g2},
2827		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2828			 FLASH_IMAGE_MAX_SIZE_g2}
2829	};
2830
2831	if (adapter->generation == BE_GEN3) {
2832		pflashcomp = gen3_flash_types;
2833		filehdr_size = sizeof(struct flash_file_hdr_g3);
2834		num_comp = ARRAY_SIZE(gen3_flash_types);
2835	} else {
2836		pflashcomp = gen2_flash_types;
2837		filehdr_size = sizeof(struct flash_file_hdr_g2);
2838		num_comp = ARRAY_SIZE(gen2_flash_types);
2839	}
2840	for (i = 0; i < num_comp; i++) {
2841		if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2842				memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2843			continue;
2844		if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2845			if (!phy_flashing_required(adapter))
2846				continue;
2847		}
2848		if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2849			(!be_flash_redboot(adapter, fw->data,
2850			pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2851			(num_of_images * sizeof(struct image_hdr)))))
2852			continue;
2853		p = fw->data;
2854		p += filehdr_size + pflashcomp[i].offset
2855			+ (num_of_images * sizeof(struct image_hdr));
2856		if (p + pflashcomp[i].size > fw->data + fw->size)
2857			return -1;
2858		total_bytes = pflashcomp[i].size;
2859		while (total_bytes) {
2860			if (total_bytes > 32*1024)
2861				num_bytes = 32*1024;
2862			else
2863				num_bytes = total_bytes;
2864			total_bytes -= num_bytes;
2865			if (!total_bytes) {
2866				if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2867					flash_op = FLASHROM_OPER_PHY_FLASH;
2868				else
2869					flash_op = FLASHROM_OPER_FLASH;
2870			} else {
2871				if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2872					flash_op = FLASHROM_OPER_PHY_SAVE;
2873				else
2874					flash_op = FLASHROM_OPER_SAVE;
2875			}
2876			memcpy(req->params.data_buf, p, num_bytes);
2877			p += num_bytes;
2878			status = be_cmd_write_flashrom(adapter, flash_cmd,
2879				pflashcomp[i].optype, flash_op, num_bytes);
2880			if (status) {
2881				if ((status == ILLEGAL_IOCTL_REQ) &&
2882					(pflashcomp[i].optype ==
2883						IMG_TYPE_PHY_FW))
2884					break;
2885				dev_err(&adapter->pdev->dev,
2886					"cmd to write to flash rom failed.\n");
2887				return -1;
2888			}
2889		}
2890	}
2891	return 0;
2892}
2893
2894static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2895{
2896	if (fhdr == NULL)
2897		return 0;
2898	if (fhdr->build[0] == '3')
2899		return BE_GEN3;
2900	else if (fhdr->build[0] == '2')
2901		return BE_GEN2;
2902	else
2903		return 0;
2904}
2905
2906static int lancer_fw_download(struct be_adapter *adapter,
2907				const struct firmware *fw)
2908{
2909#define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2910#define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2911	struct be_dma_mem flash_cmd;
2912	const u8 *data_ptr = NULL;
2913	u8 *dest_image_ptr = NULL;
2914	size_t image_size = 0;
2915	u32 chunk_size = 0;
2916	u32 data_written = 0;
2917	u32 offset = 0;
2918	int status = 0;
2919	u8 add_status = 0;
2920
2921	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2922		dev_err(&adapter->pdev->dev,
2923			"FW Image not properly aligned. "
2924			"Length must be 4 byte aligned.\n");
2925		status = -EINVAL;
2926		goto lancer_fw_exit;
2927	}
2928
2929	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2930				+ LANCER_FW_DOWNLOAD_CHUNK;
2931	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2932						&flash_cmd.dma, GFP_KERNEL);
2933	if (!flash_cmd.va) {
2934		status = -ENOMEM;
2935		dev_err(&adapter->pdev->dev,
2936			"Memory allocation failure while flashing\n");
2937		goto lancer_fw_exit;
2938	}
2939
2940	dest_image_ptr = flash_cmd.va +
2941				sizeof(struct lancer_cmd_req_write_object);
2942	image_size = fw->size;
2943	data_ptr = fw->data;
2944
2945	while (image_size) {
2946		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2947
2948		/* Copy the image chunk content. */
2949		memcpy(dest_image_ptr, data_ptr, chunk_size);
2950
2951		status = lancer_cmd_write_object(adapter, &flash_cmd,
2952				chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2953				&data_written, &add_status);
2954
2955		if (status)
2956			break;
2957
2958		offset += data_written;
2959		data_ptr += data_written;
2960		image_size -= data_written;
2961	}
2962
2963	if (!status) {
2964		/* Commit the FW written */
2965		status = lancer_cmd_write_object(adapter, &flash_cmd,
2966					0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2967					&data_written, &add_status);
2968	}
2969
2970	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2971				flash_cmd.dma);
2972	if (status) {
2973		dev_err(&adapter->pdev->dev,
2974			"Firmware load error. "
2975			"Status code: 0x%x Additional Status: 0x%x\n",
2976			status, add_status);
2977		goto lancer_fw_exit;
2978	}
2979
2980	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2981lancer_fw_exit:
2982	return status;
2983}
2984
2985static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2986{
2987	struct flash_file_hdr_g2 *fhdr;
2988	struct flash_file_hdr_g3 *fhdr3;
2989	struct image_hdr *img_hdr_ptr = NULL;
2990	struct be_dma_mem flash_cmd;
2991	const u8 *p;
2992	int status = 0, i = 0, num_imgs = 0;
2993
2994	p = fw->data;
2995	fhdr = (struct flash_file_hdr_g2 *) p;
2996
2997	flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2998	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2999					  &flash_cmd.dma, GFP_KERNEL);
3000	if (!flash_cmd.va) {
3001		status = -ENOMEM;
3002		dev_err(&adapter->pdev->dev,
3003			"Memory allocation failure while flashing\n");
3004		goto be_fw_exit;
3005	}
3006
3007	if ((adapter->generation == BE_GEN3) &&
3008			(get_ufigen_type(fhdr) == BE_GEN3)) {
3009		fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3010		num_imgs = le32_to_cpu(fhdr3->num_imgs);
3011		for (i = 0; i < num_imgs; i++) {
3012			img_hdr_ptr = (struct image_hdr *) (fw->data +
3013					(sizeof(struct flash_file_hdr_g3) +
3014					 i * sizeof(struct image_hdr)));
3015			if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3016				status = be_flash_data(adapter, fw, &flash_cmd,
3017							num_imgs);
3018		}
3019	} else if ((adapter->generation == BE_GEN2) &&
3020			(get_ufigen_type(fhdr) == BE_GEN2)) {
3021		status = be_flash_data(adapter, fw, &flash_cmd, 0);
3022	} else {
3023		dev_err(&adapter->pdev->dev,
3024			"UFI and Interface are not compatible for flashing\n");
3025		status = -1;
3026	}
3027
3028	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3029			  flash_cmd.dma);
3030	if (status) {
3031		dev_err(&adapter->pdev->dev, "Firmware load error\n");
3032		goto be_fw_exit;
3033	}
3034
3035	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3036
3037be_fw_exit:
3038	return status;
3039}
3040
3041int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3042{
3043	const struct firmware *fw;
3044	int status;
3045
3046	if (!netif_running(adapter->netdev)) {
3047		dev_err(&adapter->pdev->dev,
3048			"Firmware load not allowed (interface is down)\n");
3049		return -1;
3050	}
3051
3052	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3053	if (status)
3054		goto fw_exit;
3055
3056	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3057
3058	if (lancer_chip(adapter))
3059		status = lancer_fw_download(adapter, fw);
3060	else
3061		status = be_fw_download(adapter, fw);
3062
3063fw_exit:
3064	release_firmware(fw);
3065	return status;
3066}
3067
3068static const struct net_device_ops be_netdev_ops = {
3069	.ndo_open		= be_open,
3070	.ndo_stop		= be_close,
3071	.ndo_start_xmit		= be_xmit,
3072	.ndo_set_rx_mode	= be_set_rx_mode,
3073	.ndo_set_mac_address	= be_mac_addr_set,
3074	.ndo_change_mtu		= be_change_mtu,
3075	.ndo_get_stats64	= be_get_stats64,
3076	.ndo_validate_addr	= eth_validate_addr,
3077	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
3078	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
3079	.ndo_set_vf_mac		= be_set_vf_mac,
3080	.ndo_set_vf_vlan	= be_set_vf_vlan,
3081	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate,
3082	.ndo_get_vf_config	= be_get_vf_config,
3083#ifdef CONFIG_NET_POLL_CONTROLLER
3084	.ndo_poll_controller	= be_netpoll,
3085#endif
3086};
3087
3088static void be_netdev_init(struct net_device *netdev)
3089{
3090	struct be_adapter *adapter = netdev_priv(netdev);
3091	struct be_eq_obj *eqo;
3092	int i;
3093
3094	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3095		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3096		NETIF_F_HW_VLAN_TX;
3097	if (be_multi_rxq(adapter))
3098		netdev->hw_features |= NETIF_F_RXHASH;
3099
3100	netdev->features |= netdev->hw_features |
3101		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3102
3103	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3104		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3105
3106	netdev->priv_flags |= IFF_UNICAST_FLT;
3107
3108	netdev->flags |= IFF_MULTICAST;
3109
3110	netif_set_gso_max_size(netdev, 65535);
3111
3112	netdev->netdev_ops = &be_netdev_ops;
3113
3114	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3115
3116	for_all_evt_queues(adapter, eqo, i)
3117		netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3118}
3119
3120static void be_unmap_pci_bars(struct be_adapter *adapter)
3121{
3122	if (adapter->csr)
3123		iounmap(adapter->csr);
3124	if (adapter->db)
3125		iounmap(adapter->db);
3126}
3127
3128static int be_map_pci_bars(struct be_adapter *adapter)
3129{
3130	u8 __iomem *addr;
3131	int db_reg;
3132
3133	if (lancer_chip(adapter)) {
3134		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3135			pci_resource_len(adapter->pdev, 0));
3136		if (addr == NULL)
3137			return -ENOMEM;
3138		adapter->db = addr;
3139		return 0;
3140	}
3141
3142	if (be_physfn(adapter)) {
3143		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3144				pci_resource_len(adapter->pdev, 2));
3145		if (addr == NULL)
3146			return -ENOMEM;
3147		adapter->csr = addr;
3148	}
3149
3150	if (adapter->generation == BE_GEN2) {
3151		db_reg = 4;
3152	} else {
3153		if (be_physfn(adapter))
3154			db_reg = 4;
3155		else
3156			db_reg = 0;
3157	}
3158	addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3159				pci_resource_len(adapter->pdev, db_reg));
3160	if (addr == NULL)
3161		goto pci_map_err;
3162	adapter->db = addr;
3163
3164	return 0;
3165pci_map_err:
3166	be_unmap_pci_bars(adapter);
3167	return -ENOMEM;
3168}
3169
3170
3171static void be_ctrl_cleanup(struct be_adapter *adapter)
3172{
3173	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3174
3175	be_unmap_pci_bars(adapter);
3176
3177	if (mem->va)
3178		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3179				  mem->dma);
3180
3181	mem = &adapter->rx_filter;
3182	if (mem->va)
3183		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3184				  mem->dma);
3185}
3186
3187static int be_ctrl_init(struct be_adapter *adapter)
3188{
3189	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3190	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3191	struct be_dma_mem *rx_filter = &adapter->rx_filter;
3192	int status;
3193
3194	status = be_map_pci_bars(adapter);
3195	if (status)
3196		goto done;
3197
3198	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3199	mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3200						mbox_mem_alloc->size,
3201						&mbox_mem_alloc->dma,
3202						GFP_KERNEL);
3203	if (!mbox_mem_alloc->va) {
3204		status = -ENOMEM;
3205		goto unmap_pci_bars;
3206	}
3207	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3208	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3209	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3210	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3211
3212	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3213	rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3214					&rx_filter->dma, GFP_KERNEL);
3215	if (rx_filter->va == NULL) {
3216		status = -ENOMEM;
3217		goto free_mbox;
3218	}
3219	memset(rx_filter->va, 0, rx_filter->size);
3220
3221	mutex_init(&adapter->mbox_lock);
3222	spin_lock_init(&adapter->mcc_lock);
3223	spin_lock_init(&adapter->mcc_cq_lock);
3224
3225	init_completion(&adapter->flash_compl);
3226	pci_save_state(adapter->pdev);
3227	return 0;
3228
3229free_mbox:
3230	dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3231			  mbox_mem_alloc->va, mbox_mem_alloc->dma);
3232
3233unmap_pci_bars:
3234	be_unmap_pci_bars(adapter);
3235
3236done:
3237	return status;
3238}
3239
3240static void be_stats_cleanup(struct be_adapter *adapter)
3241{
3242	struct be_dma_mem *cmd = &adapter->stats_cmd;
3243
3244	if (cmd->va)
3245		dma_free_coherent(&adapter->pdev->dev, cmd->size,
3246				  cmd->va, cmd->dma);
3247}
3248
3249static int be_stats_init(struct be_adapter *adapter)
3250{
3251	struct be_dma_mem *cmd = &adapter->stats_cmd;
3252
3253	if (adapter->generation == BE_GEN2) {
3254		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3255	} else {
3256		if (lancer_chip(adapter))
3257			cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3258		else
3259			cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3260	}
3261	cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3262				     GFP_KERNEL);
3263	if (cmd->va == NULL)
3264		return -1;
3265	memset(cmd->va, 0, cmd->size);
3266	return 0;
3267}
3268
3269static void __devexit be_remove(struct pci_dev *pdev)
3270{
3271	struct be_adapter *adapter = pci_get_drvdata(pdev);
3272
3273	if (!adapter)
3274		return;
3275
3276	unregister_netdev(adapter->netdev);
3277
3278	be_clear(adapter);
3279
3280	be_stats_cleanup(adapter);
3281
3282	be_ctrl_cleanup(adapter);
3283
3284	be_sriov_disable(adapter);
3285
3286	pci_set_drvdata(pdev, NULL);
3287	pci_release_regions(pdev);
3288	pci_disable_device(pdev);
3289
3290	free_netdev(adapter->netdev);
3291}
3292
3293bool be_is_wol_supported(struct be_adapter *adapter)
3294{
3295	return ((adapter->wol_cap & BE_WOL_CAP) &&
3296		!be_is_wol_excluded(adapter)) ? true : false;
3297}
3298
3299static int be_get_config(struct be_adapter *adapter)
3300{
3301	int status;
3302
3303	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3304			&adapter->function_mode, &adapter->function_caps);
3305	if (status)
3306		return status;
3307
3308	if (adapter->function_mode & FLEX10_MODE)
3309		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3310	else
3311		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3312
3313	if (be_physfn(adapter))
3314		adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3315	else
3316		adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3317
3318	/* primary mac needs 1 pmac entry */
3319	adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3320				  sizeof(u32), GFP_KERNEL);
3321	if (!adapter->pmac_id)
3322		return -ENOMEM;
3323
3324	status = be_cmd_get_cntl_attributes(adapter);
3325	if (status)
3326		return status;
3327
3328	status = be_cmd_get_acpi_wol_cap(adapter);
3329	if (status) {
3330		/* in case of a failure to get wol capabillities
3331		 * check the exclusion list to determine WOL capability */
3332		if (!be_is_wol_excluded(adapter))
3333			adapter->wol_cap |= BE_WOL_CAP;
3334	}
3335
3336	if (be_is_wol_supported(adapter))
3337		adapter->wol = true;
3338
3339	return 0;
3340}
3341
3342static int be_dev_family_check(struct be_adapter *adapter)
3343{
3344	struct pci_dev *pdev = adapter->pdev;
3345	u32 sli_intf = 0, if_type;
3346
3347	switch (pdev->device) {
3348	case BE_DEVICE_ID1:
3349	case OC_DEVICE_ID1:
3350		adapter->generation = BE_GEN2;
3351		break;
3352	case BE_DEVICE_ID2:
3353	case OC_DEVICE_ID2:
3354	case OC_DEVICE_ID5:
3355		adapter->generation = BE_GEN3;
3356		break;
3357	case OC_DEVICE_ID3:
3358	case OC_DEVICE_ID4:
3359		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3360		if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3361						SLI_INTF_IF_TYPE_SHIFT;
3362
3363		if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3364			if_type != 0x02) {
3365			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3366			return -EINVAL;
3367		}
3368		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3369					 SLI_INTF_FAMILY_SHIFT);
3370		adapter->generation = BE_GEN3;
3371		break;
3372	default:
3373		adapter->generation = 0;
3374	}
3375	return 0;
3376}
3377
3378static int lancer_wait_ready(struct be_adapter *adapter)
3379{
3380#define SLIPORT_READY_TIMEOUT 30
3381	u32 sliport_status;
3382	int status = 0, i;
3383
3384	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3385		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3386		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3387			break;
3388
3389		msleep(1000);
3390	}
3391
3392	if (i == SLIPORT_READY_TIMEOUT)
3393		status = -1;
3394
3395	return status;
3396}
3397
3398static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3399{
3400	int status;
3401	u32 sliport_status, err, reset_needed;
3402	status = lancer_wait_ready(adapter);
3403	if (!status) {
3404		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3405		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3406		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3407		if (err && reset_needed) {
3408			iowrite32(SLI_PORT_CONTROL_IP_MASK,
3409					adapter->db + SLIPORT_CONTROL_OFFSET);
3410
3411			/* check adapter has corrected the error */
3412			status = lancer_wait_ready(adapter);
3413			sliport_status = ioread32(adapter->db +
3414							SLIPORT_STATUS_OFFSET);
3415			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3416						SLIPORT_STATUS_RN_MASK);
3417			if (status || sliport_status)
3418				status = -1;
3419		} else if (err || reset_needed) {
3420			status = -1;
3421		}
3422	}
3423	return status;
3424}
3425
3426static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3427{
3428	int status;
3429	u32 sliport_status;
3430
3431	if (adapter->eeh_err || adapter->ue_detected)
3432		return;
3433
3434	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3435
3436	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3437		dev_err(&adapter->pdev->dev,
3438				"Adapter in error state."
3439				"Trying to recover.\n");
3440
3441		status = lancer_test_and_set_rdy_state(adapter);
3442		if (status)
3443			goto err;
3444
3445		netif_device_detach(adapter->netdev);
3446
3447		if (netif_running(adapter->netdev))
3448			be_close(adapter->netdev);
3449
3450		be_clear(adapter);
3451
3452		adapter->fw_timeout = false;
3453
3454		status = be_setup(adapter);
3455		if (status)
3456			goto err;
3457
3458		if (netif_running(adapter->netdev)) {
3459			status = be_open(adapter->netdev);
3460			if (status)
3461				goto err;
3462		}
3463
3464		netif_device_attach(adapter->netdev);
3465
3466		dev_err(&adapter->pdev->dev,
3467				"Adapter error recovery succeeded\n");
3468	}
3469	return;
3470err:
3471	dev_err(&adapter->pdev->dev,
3472			"Adapter error recovery failed\n");
3473}
3474
3475static void be_worker(struct work_struct *work)
3476{
3477	struct be_adapter *adapter =
3478		container_of(work, struct be_adapter, work.work);
3479	struct be_rx_obj *rxo;
3480	struct be_eq_obj *eqo;
3481	int i;
3482
3483	if (lancer_chip(adapter))
3484		lancer_test_and_recover_fn_err(adapter);
3485
3486	be_detect_dump_ue(adapter);
3487
3488	/* when interrupts are not yet enabled, just reap any pending
3489	* mcc completions */
3490	if (!netif_running(adapter->netdev)) {
3491		be_process_mcc(adapter);
3492		goto reschedule;
3493	}
3494
3495	if (!adapter->stats_cmd_sent) {
3496		if (lancer_chip(adapter))
3497			lancer_cmd_get_pport_stats(adapter,
3498						&adapter->stats_cmd);
3499		else
3500			be_cmd_get_stats(adapter, &adapter->stats_cmd);
3501	}
3502
3503	for_all_rx_queues(adapter, rxo, i) {
3504		if (rxo->rx_post_starved) {
3505			rxo->rx_post_starved = false;
3506			be_post_rx_frags(rxo, GFP_KERNEL);
3507		}
3508	}
3509
3510	for_all_evt_queues(adapter, eqo, i)
3511		be_eqd_update(adapter, eqo);
3512
3513reschedule:
3514	adapter->work_counter++;
3515	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3516}
3517
3518static int __devinit be_probe(struct pci_dev *pdev,
3519			const struct pci_device_id *pdev_id)
3520{
3521	int status = 0;
3522	struct be_adapter *adapter;
3523	struct net_device *netdev;
3524
3525	status = pci_enable_device(pdev);
3526	if (status)
3527		goto do_none;
3528
3529	status = pci_request_regions(pdev, DRV_NAME);
3530	if (status)
3531		goto disable_dev;
3532	pci_set_master(pdev);
3533
3534	netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3535	if (netdev == NULL) {
3536		status = -ENOMEM;
3537		goto rel_reg;
3538	}
3539	adapter = netdev_priv(netdev);
3540	adapter->pdev = pdev;
3541	pci_set_drvdata(pdev, adapter);
3542
3543	status = be_dev_family_check(adapter);
3544	if (status)
3545		goto free_netdev;
3546
3547	adapter->netdev = netdev;
3548	SET_NETDEV_DEV(netdev, &pdev->dev);
3549
3550	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3551	if (!status) {
3552		netdev->features |= NETIF_F_HIGHDMA;
3553	} else {
3554		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3555		if (status) {
3556			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3557			goto free_netdev;
3558		}
3559	}
3560
3561	status = be_sriov_enable(adapter);
3562	if (status)
3563		goto free_netdev;
3564
3565	status = be_ctrl_init(adapter);
3566	if (status)
3567		goto disable_sriov;
3568
3569	if (lancer_chip(adapter)) {
3570		status = lancer_wait_ready(adapter);
3571		if (!status) {
3572			iowrite32(SLI_PORT_CONTROL_IP_MASK,
3573					adapter->db + SLIPORT_CONTROL_OFFSET);
3574			status = lancer_test_and_set_rdy_state(adapter);
3575		}
3576		if (status) {
3577			dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3578			goto ctrl_clean;
3579		}
3580	}
3581
3582	/* sync up with fw's ready state */
3583	if (be_physfn(adapter)) {
3584		status = be_cmd_POST(adapter);
3585		if (status)
3586			goto ctrl_clean;
3587	}
3588
3589	/* tell fw we're ready to fire cmds */
3590	status = be_cmd_fw_init(adapter);
3591	if (status)
3592		goto ctrl_clean;
3593
3594	status = be_cmd_reset_function(adapter);
3595	if (status)
3596		goto ctrl_clean;
3597
3598	/* The INTR bit may be set in the card when probed by a kdump kernel
3599	 * after a crash.
3600	 */
3601	if (!lancer_chip(adapter))
3602		be_intr_set(adapter, false);
3603
3604	status = be_stats_init(adapter);
3605	if (status)
3606		goto ctrl_clean;
3607
3608	status = be_get_config(adapter);
3609	if (status)
3610		goto stats_clean;
3611
3612	INIT_DELAYED_WORK(&adapter->work, be_worker);
3613	adapter->rx_fc = adapter->tx_fc = true;
3614
3615	status = be_setup(adapter);
3616	if (status)
3617		goto msix_disable;
3618
3619	be_netdev_init(netdev);
3620	status = register_netdev(netdev);
3621	if (status != 0)
3622		goto unsetup;
3623
3624	dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3625		adapter->port_num);
3626
3627	return 0;
3628
3629unsetup:
3630	be_clear(adapter);
3631msix_disable:
3632	be_msix_disable(adapter);
3633stats_clean:
3634	be_stats_cleanup(adapter);
3635ctrl_clean:
3636	be_ctrl_cleanup(adapter);
3637disable_sriov:
3638	be_sriov_disable(adapter);
3639free_netdev:
3640	free_netdev(netdev);
3641	pci_set_drvdata(pdev, NULL);
3642rel_reg:
3643	pci_release_regions(pdev);
3644disable_dev:
3645	pci_disable_device(pdev);
3646do_none:
3647	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3648	return status;
3649}
3650
3651static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3652{
3653	struct be_adapter *adapter = pci_get_drvdata(pdev);
3654	struct net_device *netdev =  adapter->netdev;
3655
3656	if (adapter->wol)
3657		be_setup_wol(adapter, true);
3658
3659	netif_device_detach(netdev);
3660	if (netif_running(netdev)) {
3661		rtnl_lock();
3662		be_close(netdev);
3663		rtnl_unlock();
3664	}
3665	be_clear(adapter);
3666
3667	pci_save_state(pdev);
3668	pci_disable_device(pdev);
3669	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3670	return 0;
3671}
3672
3673static int be_resume(struct pci_dev *pdev)
3674{
3675	int status = 0;
3676	struct be_adapter *adapter = pci_get_drvdata(pdev);
3677	struct net_device *netdev =  adapter->netdev;
3678
3679	netif_device_detach(netdev);
3680
3681	status = pci_enable_device(pdev);
3682	if (status)
3683		return status;
3684
3685	pci_set_power_state(pdev, 0);
3686	pci_restore_state(pdev);
3687
3688	/* tell fw we're ready to fire cmds */
3689	status = be_cmd_fw_init(adapter);
3690	if (status)
3691		return status;
3692
3693	be_setup(adapter);
3694	if (netif_running(netdev)) {
3695		rtnl_lock();
3696		be_open(netdev);
3697		rtnl_unlock();
3698	}
3699	netif_device_attach(netdev);
3700
3701	if (adapter->wol)
3702		be_setup_wol(adapter, false);
3703
3704	return 0;
3705}
3706
3707/*
3708 * An FLR will stop BE from DMAing any data.
3709 */
3710static void be_shutdown(struct pci_dev *pdev)
3711{
3712	struct be_adapter *adapter = pci_get_drvdata(pdev);
3713
3714	if (!adapter)
3715		return;
3716
3717	cancel_delayed_work_sync(&adapter->work);
3718
3719	netif_device_detach(adapter->netdev);
3720
3721	if (adapter->wol)
3722		be_setup_wol(adapter, true);
3723
3724	be_cmd_reset_function(adapter);
3725
3726	pci_disable_device(pdev);
3727}
3728
3729static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3730				pci_channel_state_t state)
3731{
3732	struct be_adapter *adapter = pci_get_drvdata(pdev);
3733	struct net_device *netdev =  adapter->netdev;
3734
3735	dev_err(&adapter->pdev->dev, "EEH error detected\n");
3736
3737	adapter->eeh_err = true;
3738
3739	netif_device_detach(netdev);
3740
3741	if (netif_running(netdev)) {
3742		rtnl_lock();
3743		be_close(netdev);
3744		rtnl_unlock();
3745	}
3746	be_clear(adapter);
3747
3748	if (state == pci_channel_io_perm_failure)
3749		return PCI_ERS_RESULT_DISCONNECT;
3750
3751	pci_disable_device(pdev);
3752
3753	return PCI_ERS_RESULT_NEED_RESET;
3754}
3755
3756static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3757{
3758	struct be_adapter *adapter = pci_get_drvdata(pdev);
3759	int status;
3760
3761	dev_info(&adapter->pdev->dev, "EEH reset\n");
3762	adapter->eeh_err = false;
3763	adapter->ue_detected = false;
3764	adapter->fw_timeout = false;
3765
3766	status = pci_enable_device(pdev);
3767	if (status)
3768		return PCI_ERS_RESULT_DISCONNECT;
3769
3770	pci_set_master(pdev);
3771	pci_set_power_state(pdev, 0);
3772	pci_restore_state(pdev);
3773
3774	/* Check if card is ok and fw is ready */
3775	status = be_cmd_POST(adapter);
3776	if (status)
3777		return PCI_ERS_RESULT_DISCONNECT;
3778
3779	return PCI_ERS_RESULT_RECOVERED;
3780}
3781
3782static void be_eeh_resume(struct pci_dev *pdev)
3783{
3784	int status = 0;
3785	struct be_adapter *adapter = pci_get_drvdata(pdev);
3786	struct net_device *netdev =  adapter->netdev;
3787
3788	dev_info(&adapter->pdev->dev, "EEH resume\n");
3789
3790	pci_save_state(pdev);
3791
3792	/* tell fw we're ready to fire cmds */
3793	status = be_cmd_fw_init(adapter);
3794	if (status)
3795		goto err;
3796
3797	status = be_setup(adapter);
3798	if (status)
3799		goto err;
3800
3801	if (netif_running(netdev)) {
3802		status = be_open(netdev);
3803		if (status)
3804			goto err;
3805	}
3806	netif_device_attach(netdev);
3807	return;
3808err:
3809	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3810}
3811
3812static struct pci_error_handlers be_eeh_handlers = {
3813	.error_detected = be_eeh_err_detected,
3814	.slot_reset = be_eeh_reset,
3815	.resume = be_eeh_resume,
3816};
3817
3818static struct pci_driver be_driver = {
3819	.name = DRV_NAME,
3820	.id_table = be_dev_ids,
3821	.probe = be_probe,
3822	.remove = be_remove,
3823	.suspend = be_suspend,
3824	.resume = be_resume,
3825	.shutdown = be_shutdown,
3826	.err_handler = &be_eeh_handlers
3827};
3828
3829static int __init be_init_module(void)
3830{
3831	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3832	    rx_frag_size != 2048) {
3833		printk(KERN_WARNING DRV_NAME
3834			" : Module param rx_frag_size must be 2048/4096/8192."
3835			" Using 2048\n");
3836		rx_frag_size = 2048;
3837	}
3838
3839	return pci_register_driver(&be_driver);
3840}
3841module_init(be_init_module);
3842
3843static void __exit be_exit_module(void)
3844{
3845	pci_unregister_driver(&be_driver);
3846}
3847module_exit(be_exit_module);
3848