igb.h revision 5536d2102a2d37a02e2c233ead4e1e4cabbdcd5b
1/*******************************************************************************
2
3  Intel(R) Gigabit Ethernet Linux driver
4  Copyright(c) 2007-2012 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/* Linux PRO/1000 Ethernet Driver main header file */
30
31#ifndef _IGB_H_
32#define _IGB_H_
33
34#include "e1000_mac.h"
35#include "e1000_82575.h"
36
37#ifdef CONFIG_IGB_PTP
38#include <linux/clocksource.h>
39#include <linux/net_tstamp.h>
40#include <linux/ptp_clock_kernel.h>
41#endif /* CONFIG_IGB_PTP */
42#include <linux/bitops.h>
43#include <linux/if_vlan.h>
44
45struct igb_adapter;
46
47/* Interrupt defines */
48#define IGB_START_ITR                    648 /* ~6000 ints/sec */
49#define IGB_4K_ITR                       980
50#define IGB_20K_ITR                      196
51#define IGB_70K_ITR                       56
52
53/* TX/RX descriptor defines */
54#define IGB_DEFAULT_TXD                  256
55#define IGB_DEFAULT_TX_WORK		 128
56#define IGB_MIN_TXD                       80
57#define IGB_MAX_TXD                     4096
58
59#define IGB_DEFAULT_RXD                  256
60#define IGB_MIN_RXD                       80
61#define IGB_MAX_RXD                     4096
62
63#define IGB_DEFAULT_ITR                    3 /* dynamic */
64#define IGB_MAX_ITR_USECS              10000
65#define IGB_MIN_ITR_USECS                 10
66#define NON_Q_VECTORS                      1
67#define MAX_Q_VECTORS                      8
68
69/* Transmit and receive queues */
70#define IGB_MAX_RX_QUEUES                  8
71#define IGB_MAX_RX_QUEUES_82575            4
72#define IGB_MAX_RX_QUEUES_I211             2
73#define IGB_MAX_TX_QUEUES                  8
74#define IGB_MAX_VF_MC_ENTRIES              30
75#define IGB_MAX_VF_FUNCTIONS               8
76#define IGB_MAX_VFTA_ENTRIES               128
77#define IGB_82576_VF_DEV_ID                0x10CA
78#define IGB_I350_VF_DEV_ID                 0x1520
79
80/* NVM version defines */
81#define IGB_MAJOR_MASK			0xF000
82#define IGB_MINOR_MASK			0x0FF0
83#define IGB_BUILD_MASK			0x000F
84#define IGB_COMB_VER_MASK		0x00FF
85#define IGB_MAJOR_SHIFT			12
86#define IGB_MINOR_SHIFT			4
87#define IGB_COMB_VER_SHFT		8
88#define IGB_NVM_VER_INVALID		0xFFFF
89#define IGB_ETRACK_SHIFT		16
90#define NVM_ETRACK_WORD			0x0042
91#define NVM_COMB_VER_OFF		0x0083
92#define NVM_COMB_VER_PTR		0x003d
93
94struct vf_data_storage {
95	unsigned char vf_mac_addresses[ETH_ALEN];
96	u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
97	u16 num_vf_mc_hashes;
98	u16 vlans_enabled;
99	u32 flags;
100	unsigned long last_nack;
101	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
102	u16 pf_qos;
103	u16 tx_rate;
104};
105
106#define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
107#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
108#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
109#define IGB_VF_FLAG_PF_SET_MAC     0x00000008 /* PF has set MAC address */
110
111/* RX descriptor control thresholds.
112 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
113 *           descriptors available in its onboard memory.
114 *           Setting this to 0 disables RX descriptor prefetch.
115 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
116 *           available in host memory.
117 *           If PTHRESH is 0, this should also be 0.
118 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
119 *           descriptors until either it has this many to write back, or the
120 *           ITR timer expires.
121 */
122#define IGB_RX_PTHRESH                     8
123#define IGB_RX_HTHRESH                     8
124#define IGB_TX_PTHRESH                     8
125#define IGB_TX_HTHRESH                     1
126#define IGB_RX_WTHRESH                     ((hw->mac.type == e1000_82576 && \
127					     adapter->msix_entries) ? 1 : 4)
128#define IGB_TX_WTHRESH                     ((hw->mac.type == e1000_82576 && \
129					     adapter->msix_entries) ? 1 : 16)
130
131/* this is the size past which hardware will drop packets when setting LPE=0 */
132#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
133
134/* Supported Rx Buffer Sizes */
135#define IGB_RXBUFFER_256	256
136#define IGB_RXBUFFER_2048	2048
137#define IGB_RX_HDR_LEN		IGB_RXBUFFER_256
138#define IGB_RX_BUFSZ		IGB_RXBUFFER_2048
139
140/* How many Tx Descriptors do we need to call netif_wake_queue ? */
141#define IGB_TX_QUEUE_WAKE	16
142/* How many Rx Buffers do we bundle into one write to the hardware ? */
143#define IGB_RX_BUFFER_WRITE	16	/* Must be power of 2 */
144
145#define AUTO_ALL_MODES            0
146#define IGB_EEPROM_APME         0x0400
147
148#ifndef IGB_MASTER_SLAVE
149/* Switch to override PHY master/slave setting */
150#define IGB_MASTER_SLAVE	e1000_ms_hw_default
151#endif
152
153#define IGB_MNG_VLAN_NONE -1
154
155#define IGB_TX_FLAGS_CSUM		0x00000001
156#define IGB_TX_FLAGS_VLAN		0x00000002
157#define IGB_TX_FLAGS_TSO		0x00000004
158#define IGB_TX_FLAGS_IPV4		0x00000008
159#define IGB_TX_FLAGS_TSTAMP		0x00000010
160#define IGB_TX_FLAGS_VLAN_MASK		0xffff0000
161#define IGB_TX_FLAGS_VLAN_SHIFT	16
162
163/* wrapper around a pointer to a socket buffer,
164 * so a DMA handle can be stored along with the buffer */
165struct igb_tx_buffer {
166	union e1000_adv_tx_desc *next_to_watch;
167	unsigned long time_stamp;
168	struct sk_buff *skb;
169	unsigned int bytecount;
170	u16 gso_segs;
171	__be16 protocol;
172	DEFINE_DMA_UNMAP_ADDR(dma);
173	DEFINE_DMA_UNMAP_LEN(len);
174	u32 tx_flags;
175};
176
177struct igb_rx_buffer {
178	dma_addr_t dma;
179	struct page *page;
180	unsigned int page_offset;
181};
182
183struct igb_tx_queue_stats {
184	u64 packets;
185	u64 bytes;
186	u64 restart_queue;
187	u64 restart_queue2;
188};
189
190struct igb_rx_queue_stats {
191	u64 packets;
192	u64 bytes;
193	u64 drops;
194	u64 csum_err;
195	u64 alloc_failed;
196};
197
198struct igb_ring_container {
199	struct igb_ring *ring;		/* pointer to linked list of rings */
200	unsigned int total_bytes;	/* total bytes processed this int */
201	unsigned int total_packets;	/* total packets processed this int */
202	u16 work_limit;			/* total work allowed per interrupt */
203	u8 count;			/* total number of rings in vector */
204	u8 itr;				/* current ITR setting for ring */
205};
206
207struct igb_ring {
208	struct igb_q_vector *q_vector;	/* backlink to q_vector */
209	struct net_device *netdev;	/* back pointer to net_device */
210	struct device *dev;		/* device pointer for dma mapping */
211	union {				/* array of buffer info structs */
212		struct igb_tx_buffer *tx_buffer_info;
213		struct igb_rx_buffer *rx_buffer_info;
214	};
215	void *desc;			/* descriptor ring memory */
216	unsigned long flags;		/* ring specific flags */
217	void __iomem *tail;		/* pointer to ring tail register */
218	dma_addr_t dma;			/* phys address of the ring */
219	unsigned int  size;		/* length of desc. ring in bytes */
220
221	u16 count;			/* number of desc. in the ring */
222	u8 queue_index;			/* logical index of the ring*/
223	u8 reg_idx;			/* physical index of the ring */
224
225	/* everything past this point are written often */
226	u16 next_to_clean;
227	u16 next_to_use;
228	u16 next_to_alloc;
229
230	union {
231		/* TX */
232		struct {
233			struct igb_tx_queue_stats tx_stats;
234			struct u64_stats_sync tx_syncp;
235			struct u64_stats_sync tx_syncp2;
236		};
237		/* RX */
238		struct {
239			struct sk_buff *skb;
240			struct igb_rx_queue_stats rx_stats;
241			struct u64_stats_sync rx_syncp;
242		};
243	};
244} ____cacheline_internodealigned_in_smp;
245
246struct igb_q_vector {
247	struct igb_adapter *adapter;	/* backlink */
248	int cpu;			/* CPU for DCA */
249	u32 eims_value;			/* EIMS mask value */
250
251	u16 itr_val;
252	u8 set_itr;
253	void __iomem *itr_register;
254
255	struct igb_ring_container rx, tx;
256
257	struct napi_struct napi;
258	struct rcu_head rcu;	/* to avoid race with update stats on free */
259	char name[IFNAMSIZ + 9];
260
261	/* for dynamic allocation of rings associated with this q_vector */
262	struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
263};
264
265enum e1000_ring_flags_t {
266	IGB_RING_FLAG_RX_SCTP_CSUM,
267	IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
268	IGB_RING_FLAG_TX_CTX_IDX,
269	IGB_RING_FLAG_TX_DETECT_HANG
270};
271
272#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
273
274#define IGB_RX_DESC(R, i)	    \
275	(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
276#define IGB_TX_DESC(R, i)	    \
277	(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
278#define IGB_TX_CTXTDESC(R, i)	    \
279	(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
280
281/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
282static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
283				      const u32 stat_err_bits)
284{
285	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
286}
287
288/* igb_desc_unused - calculate if we have unused descriptors */
289static inline int igb_desc_unused(struct igb_ring *ring)
290{
291	if (ring->next_to_clean > ring->next_to_use)
292		return ring->next_to_clean - ring->next_to_use - 1;
293
294	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
295}
296
297/* board specific private data structure */
298struct igb_adapter {
299	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
300
301	struct net_device *netdev;
302
303	unsigned long state;
304	unsigned int flags;
305
306	unsigned int num_q_vectors;
307	struct msix_entry *msix_entries;
308
309	/* Interrupt Throttle Rate */
310	u32 rx_itr_setting;
311	u32 tx_itr_setting;
312	u16 tx_itr;
313	u16 rx_itr;
314
315	/* TX */
316	u16 tx_work_limit;
317	u32 tx_timeout_count;
318	int num_tx_queues;
319	struct igb_ring *tx_ring[16];
320
321	/* RX */
322	int num_rx_queues;
323	struct igb_ring *rx_ring[16];
324
325	u32 max_frame_size;
326	u32 min_frame_size;
327
328	struct timer_list watchdog_timer;
329	struct timer_list phy_info_timer;
330
331	u16 mng_vlan_id;
332	u32 bd_number;
333	u32 wol;
334	u32 en_mng_pt;
335	u16 link_speed;
336	u16 link_duplex;
337
338	struct work_struct reset_task;
339	struct work_struct watchdog_task;
340	bool fc_autoneg;
341	u8  tx_timeout_factor;
342	struct timer_list blink_timer;
343	unsigned long led_status;
344
345	/* OS defined structs */
346	struct pci_dev *pdev;
347
348	spinlock_t stats64_lock;
349	struct rtnl_link_stats64 stats64;
350
351	/* structs defined in e1000_hw.h */
352	struct e1000_hw hw;
353	struct e1000_hw_stats stats;
354	struct e1000_phy_info phy_info;
355	struct e1000_phy_stats phy_stats;
356
357	u32 test_icr;
358	struct igb_ring test_tx_ring;
359	struct igb_ring test_rx_ring;
360
361	int msg_enable;
362
363	struct igb_q_vector *q_vector[MAX_Q_VECTORS];
364	u32 eims_enable_mask;
365	u32 eims_other;
366
367	/* to not mess up cache alignment, always add to the bottom */
368	u32 eeprom_wol;
369
370	u16 tx_ring_count;
371	u16 rx_ring_count;
372	unsigned int vfs_allocated_count;
373	struct vf_data_storage *vf_data;
374	int vf_rate_link_speed;
375	u32 rss_queues;
376	u32 wvbr;
377	u32 *shadow_vfta;
378
379#ifdef CONFIG_IGB_PTP
380	struct ptp_clock *ptp_clock;
381	struct ptp_clock_info ptp_caps;
382	struct delayed_work ptp_overflow_work;
383	struct work_struct ptp_tx_work;
384	struct sk_buff *ptp_tx_skb;
385	spinlock_t tmreg_lock;
386	struct cyclecounter cc;
387	struct timecounter tc;
388#endif /* CONFIG_IGB_PTP */
389
390	char fw_version[32];
391};
392
393#define IGB_FLAG_HAS_MSI           (1 << 0)
394#define IGB_FLAG_DCA_ENABLED       (1 << 1)
395#define IGB_FLAG_QUAD_PORT_A       (1 << 2)
396#define IGB_FLAG_QUEUE_PAIRS       (1 << 3)
397#define IGB_FLAG_DMAC              (1 << 4)
398#define IGB_FLAG_PTP               (1 << 5)
399
400/* DMA Coalescing defines */
401#define IGB_MIN_TXPBSIZE           20408
402#define IGB_TX_BUF_4096            4096
403#define IGB_DMCTLX_DCFLUSH_DIS     0x80000000  /* Disable DMA Coal Flush */
404
405#define IGB_82576_TSYNC_SHIFT 19
406#define IGB_TS_HDR_LEN        16
407enum e1000_state_t {
408	__IGB_TESTING,
409	__IGB_RESETTING,
410	__IGB_DOWN
411};
412
413enum igb_boards {
414	board_82575,
415};
416
417extern char igb_driver_name[];
418extern char igb_driver_version[];
419
420extern int igb_up(struct igb_adapter *);
421extern void igb_down(struct igb_adapter *);
422extern void igb_reinit_locked(struct igb_adapter *);
423extern void igb_reset(struct igb_adapter *);
424extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
425extern int igb_setup_tx_resources(struct igb_ring *);
426extern int igb_setup_rx_resources(struct igb_ring *);
427extern void igb_free_tx_resources(struct igb_ring *);
428extern void igb_free_rx_resources(struct igb_ring *);
429extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
430extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
431extern void igb_setup_tctl(struct igb_adapter *);
432extern void igb_setup_rctl(struct igb_adapter *);
433extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
434extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
435					   struct igb_tx_buffer *);
436extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
437extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
438extern bool igb_has_link(struct igb_adapter *adapter);
439extern void igb_set_ethtool_ops(struct net_device *);
440extern void igb_power_up_link(struct igb_adapter *);
441extern void igb_set_fw_version(struct igb_adapter *);
442#ifdef CONFIG_IGB_PTP
443extern void igb_ptp_init(struct igb_adapter *adapter);
444extern void igb_ptp_stop(struct igb_adapter *adapter);
445extern void igb_ptp_reset(struct igb_adapter *adapter);
446extern void igb_ptp_tx_work(struct work_struct *work);
447extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
448extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
449				struct sk_buff *skb);
450extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
451				unsigned char *va,
452				struct sk_buff *skb);
453static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
454				       union e1000_adv_rx_desc *rx_desc,
455				       struct sk_buff *skb)
456{
457	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
458	    !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
459		igb_ptp_rx_rgtstamp(q_vector, skb);
460}
461
462extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
463				  struct ifreq *ifr, int cmd);
464#endif /* CONFIG_IGB_PTP */
465
466static inline s32 igb_reset_phy(struct e1000_hw *hw)
467{
468	if (hw->phy.ops.reset)
469		return hw->phy.ops.reset(hw);
470
471	return 0;
472}
473
474static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
475{
476	if (hw->phy.ops.read_reg)
477		return hw->phy.ops.read_reg(hw, offset, data);
478
479	return 0;
480}
481
482static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
483{
484	if (hw->phy.ops.write_reg)
485		return hw->phy.ops.write_reg(hw, offset, data);
486
487	return 0;
488}
489
490static inline s32 igb_get_phy_info(struct e1000_hw *hw)
491{
492	if (hw->phy.ops.get_phy_info)
493		return hw->phy.ops.get_phy_info(hw);
494
495	return 0;
496}
497
498static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
499{
500	return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
501}
502
503#endif /* _IGB_H_ */
504