1/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37static void t3_port_intr_clear(struct adapter *adapter, int idx);
38
39/**
40 *	t3_wait_op_done_val - wait until an operation is completed
41 *	@adapter: the adapter performing the operation
42 *	@reg: the register to check for completion
43 *	@mask: a single-bit field within @reg that indicates completion
44 *	@polarity: the value of the field when the operation is completed
45 *	@attempts: number of check iterations
46 *	@delay: delay in usecs between iterations
47 *	@valp: where to store the value of the register at completion time
48 *
49 *	Wait until an operation is completed by checking a bit in a register
50 *	up to @attempts times.  If @valp is not NULL the value of the register
51 *	at the time it indicated completion is stored there.  Returns 0 if the
52 *	operation completes and -EAGAIN otherwise.
53 */
54
55int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
56			int polarity, int attempts, int delay, u32 *valp)
57{
58	while (1) {
59		u32 val = t3_read_reg(adapter, reg);
60
61		if (!!(val & mask) == polarity) {
62			if (valp)
63				*valp = val;
64			return 0;
65		}
66		if (--attempts == 0)
67			return -EAGAIN;
68		if (delay)
69			udelay(delay);
70	}
71}
72
73/**
74 *	t3_write_regs - write a bunch of registers
75 *	@adapter: the adapter to program
76 *	@p: an array of register address/register value pairs
77 *	@n: the number of address/value pairs
78 *	@offset: register address offset
79 *
80 *	Takes an array of register address/register value pairs and writes each
81 *	value to the corresponding register.  Register addresses are adjusted
82 *	by the supplied offset.
83 */
84void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
85		   int n, unsigned int offset)
86{
87	while (n--) {
88		t3_write_reg(adapter, p->reg_addr + offset, p->val);
89		p++;
90	}
91}
92
93/**
94 *	t3_set_reg_field - set a register field to a value
95 *	@adapter: the adapter to program
96 *	@addr: the register address
97 *	@mask: specifies the portion of the register to modify
98 *	@val: the new value for the register field
99 *
100 *	Sets a register field specified by the supplied mask to the
101 *	given value.
102 */
103void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104		      u32 val)
105{
106	u32 v = t3_read_reg(adapter, addr) & ~mask;
107
108	t3_write_reg(adapter, addr, v | val);
109	t3_read_reg(adapter, addr);	/* flush */
110}
111
112/**
113 *	t3_read_indirect - read indirectly addressed registers
114 *	@adap: the adapter
115 *	@addr_reg: register holding the indirect address
116 *	@data_reg: register holding the value of the indirect register
117 *	@vals: where the read register values are stored
118 *	@start_idx: index of first indirect register to read
119 *	@nregs: how many indirect registers to read
120 *
121 *	Reads registers that are accessed indirectly through an address/data
122 *	register pair.
123 */
124static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
125			     unsigned int data_reg, u32 *vals,
126			     unsigned int nregs, unsigned int start_idx)
127{
128	while (nregs--) {
129		t3_write_reg(adap, addr_reg, start_idx);
130		*vals++ = t3_read_reg(adap, data_reg);
131		start_idx++;
132	}
133}
134
135/**
136 *	t3_mc7_bd_read - read from MC7 through backdoor accesses
137 *	@mc7: identifies MC7 to read from
138 *	@start: index of first 64-bit word to read
139 *	@n: number of 64-bit words to read
140 *	@buf: where to store the read result
141 *
142 *	Read n 64-bit words from MC7 starting at word start, using backdoor
143 *	accesses.
144 */
145int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146		   u64 *buf)
147{
148	static const int shift[] = { 0, 0, 16, 24 };
149	static const int step[] = { 0, 32, 16, 8 };
150
151	unsigned int size64 = mc7->size / 8;	/* # of 64-bit words */
152	struct adapter *adap = mc7->adapter;
153
154	if (start >= size64 || start + n > size64)
155		return -EINVAL;
156
157	start *= (8 << mc7->width);
158	while (n--) {
159		int i;
160		u64 val64 = 0;
161
162		for (i = (1 << mc7->width) - 1; i >= 0; --i) {
163			int attempts = 10;
164			u32 val;
165
166			t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
167			t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
168			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
169			while ((val & F_BUSY) && attempts--)
170				val = t3_read_reg(adap,
171						  mc7->offset + A_MC7_BD_OP);
172			if (val & F_BUSY)
173				return -EIO;
174
175			val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
176			if (mc7->width == 0) {
177				val64 = t3_read_reg(adap,
178						    mc7->offset +
179						    A_MC7_BD_DATA0);
180				val64 |= (u64) val << 32;
181			} else {
182				if (mc7->width > 1)
183					val >>= shift[mc7->width];
184				val64 |= (u64) val << (step[mc7->width] * i);
185			}
186			start += 8;
187		}
188		*buf++ = val64;
189	}
190	return 0;
191}
192
193/*
194 * Initialize MI1.
195 */
196static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
197{
198	u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
199	u32 val = F_PREEN | V_CLKDIV(clkdiv);
200
201	t3_write_reg(adap, A_MI1_CFG, val);
202}
203
204#define MDIO_ATTEMPTS 20
205
206/*
207 * MI1 read/write operations for clause 22 PHYs.
208 */
209static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
210		       u16 reg_addr)
211{
212	struct port_info *pi = netdev_priv(dev);
213	struct adapter *adapter = pi->adapter;
214	int ret;
215	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216
217	mutex_lock(&adapter->mdio_lock);
218	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
219	t3_write_reg(adapter, A_MI1_ADDR, addr);
220	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
221	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222	if (!ret)
223		ret = t3_read_reg(adapter, A_MI1_DATA);
224	mutex_unlock(&adapter->mdio_lock);
225	return ret;
226}
227
228static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
229			u16 reg_addr, u16 val)
230{
231	struct port_info *pi = netdev_priv(dev);
232	struct adapter *adapter = pi->adapter;
233	int ret;
234	u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
235
236	mutex_lock(&adapter->mdio_lock);
237	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238	t3_write_reg(adapter, A_MI1_ADDR, addr);
239	t3_write_reg(adapter, A_MI1_DATA, val);
240	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241	ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242	mutex_unlock(&adapter->mdio_lock);
243	return ret;
244}
245
246static const struct mdio_ops mi1_mdio_ops = {
247	.read = t3_mi1_read,
248	.write = t3_mi1_write,
249	.mode_support = MDIO_SUPPORTS_C22
250};
251
252/*
253 * Performs the address cycle for clause 45 PHYs.
254 * Must be called with the MDIO_LOCK held.
255 */
256static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
257		       int reg_addr)
258{
259	u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261	t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
262	t3_write_reg(adapter, A_MI1_ADDR, addr);
263	t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264	t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265	return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
266			       MDIO_ATTEMPTS, 10);
267}
268
269/*
270 * MI1 read/write operations for indirect-addressed PHYs.
271 */
272static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
273			u16 reg_addr)
274{
275	struct port_info *pi = netdev_priv(dev);
276	struct adapter *adapter = pi->adapter;
277	int ret;
278
279	mutex_lock(&adapter->mdio_lock);
280	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
281	if (!ret) {
282		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
283		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
284				      MDIO_ATTEMPTS, 10);
285		if (!ret)
286			ret = t3_read_reg(adapter, A_MI1_DATA);
287	}
288	mutex_unlock(&adapter->mdio_lock);
289	return ret;
290}
291
292static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
293			 u16 reg_addr, u16 val)
294{
295	struct port_info *pi = netdev_priv(dev);
296	struct adapter *adapter = pi->adapter;
297	int ret;
298
299	mutex_lock(&adapter->mdio_lock);
300	ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
301	if (!ret) {
302		t3_write_reg(adapter, A_MI1_DATA, val);
303		t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
304		ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
305				      MDIO_ATTEMPTS, 10);
306	}
307	mutex_unlock(&adapter->mdio_lock);
308	return ret;
309}
310
311static const struct mdio_ops mi1_mdio_ext_ops = {
312	.read = mi1_ext_read,
313	.write = mi1_ext_write,
314	.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
315};
316
317/**
318 *	t3_mdio_change_bits - modify the value of a PHY register
319 *	@phy: the PHY to operate on
320 *	@mmd: the device address
321 *	@reg: the register address
322 *	@clear: what part of the register value to mask off
323 *	@set: what part of the register value to set
324 *
325 *	Changes the value of a PHY register by applying a mask to its current
326 *	value and ORing the result with a new value.
327 */
328int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
329			unsigned int set)
330{
331	int ret;
332	unsigned int val;
333
334	ret = t3_mdio_read(phy, mmd, reg, &val);
335	if (!ret) {
336		val &= ~clear;
337		ret = t3_mdio_write(phy, mmd, reg, val | set);
338	}
339	return ret;
340}
341
342/**
343 *	t3_phy_reset - reset a PHY block
344 *	@phy: the PHY to operate on
345 *	@mmd: the device address of the PHY block to reset
346 *	@wait: how long to wait for the reset to complete in 1ms increments
347 *
348 *	Resets a PHY block and optionally waits for the reset to complete.
349 *	@mmd should be 0 for 10/100/1000 PHYs and the device address to reset
350 *	for 10G PHYs.
351 */
352int t3_phy_reset(struct cphy *phy, int mmd, int wait)
353{
354	int err;
355	unsigned int ctl;
356
357	err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
358				  MDIO_CTRL1_RESET);
359	if (err || !wait)
360		return err;
361
362	do {
363		err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
364		if (err)
365			return err;
366		ctl &= MDIO_CTRL1_RESET;
367		if (ctl)
368			msleep(1);
369	} while (ctl && --wait);
370
371	return ctl ? -1 : 0;
372}
373
374/**
375 *	t3_phy_advertise - set the PHY advertisement registers for autoneg
376 *	@phy: the PHY to operate on
377 *	@advert: bitmap of capabilities the PHY should advertise
378 *
379 *	Sets a 10/100/1000 PHY's advertisement registers to advertise the
380 *	requested capabilities.
381 */
382int t3_phy_advertise(struct cphy *phy, unsigned int advert)
383{
384	int err;
385	unsigned int val = 0;
386
387	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
388	if (err)
389		return err;
390
391	val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
392	if (advert & ADVERTISED_1000baseT_Half)
393		val |= ADVERTISE_1000HALF;
394	if (advert & ADVERTISED_1000baseT_Full)
395		val |= ADVERTISE_1000FULL;
396
397	err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
398	if (err)
399		return err;
400
401	val = 1;
402	if (advert & ADVERTISED_10baseT_Half)
403		val |= ADVERTISE_10HALF;
404	if (advert & ADVERTISED_10baseT_Full)
405		val |= ADVERTISE_10FULL;
406	if (advert & ADVERTISED_100baseT_Half)
407		val |= ADVERTISE_100HALF;
408	if (advert & ADVERTISED_100baseT_Full)
409		val |= ADVERTISE_100FULL;
410	if (advert & ADVERTISED_Pause)
411		val |= ADVERTISE_PAUSE_CAP;
412	if (advert & ADVERTISED_Asym_Pause)
413		val |= ADVERTISE_PAUSE_ASYM;
414	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
415}
416
417/**
418 *	t3_phy_advertise_fiber - set fiber PHY advertisement register
419 *	@phy: the PHY to operate on
420 *	@advert: bitmap of capabilities the PHY should advertise
421 *
422 *	Sets a fiber PHY's advertisement register to advertise the
423 *	requested capabilities.
424 */
425int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
426{
427	unsigned int val = 0;
428
429	if (advert & ADVERTISED_1000baseT_Half)
430		val |= ADVERTISE_1000XHALF;
431	if (advert & ADVERTISED_1000baseT_Full)
432		val |= ADVERTISE_1000XFULL;
433	if (advert & ADVERTISED_Pause)
434		val |= ADVERTISE_1000XPAUSE;
435	if (advert & ADVERTISED_Asym_Pause)
436		val |= ADVERTISE_1000XPSE_ASYM;
437	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
438}
439
440/**
441 *	t3_set_phy_speed_duplex - force PHY speed and duplex
442 *	@phy: the PHY to operate on
443 *	@speed: requested PHY speed
444 *	@duplex: requested PHY duplex
445 *
446 *	Force a 10/100/1000 PHY's speed and duplex.  This also disables
447 *	auto-negotiation except for GigE, where auto-negotiation is mandatory.
448 */
449int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
450{
451	int err;
452	unsigned int ctl;
453
454	err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
455	if (err)
456		return err;
457
458	if (speed >= 0) {
459		ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
460		if (speed == SPEED_100)
461			ctl |= BMCR_SPEED100;
462		else if (speed == SPEED_1000)
463			ctl |= BMCR_SPEED1000;
464	}
465	if (duplex >= 0) {
466		ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
467		if (duplex == DUPLEX_FULL)
468			ctl |= BMCR_FULLDPLX;
469	}
470	if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
471		ctl |= BMCR_ANENABLE;
472	return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473}
474
475int t3_phy_lasi_intr_enable(struct cphy *phy)
476{
477	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
478			     MDIO_PMA_LASI_LSALARM);
479}
480
481int t3_phy_lasi_intr_disable(struct cphy *phy)
482{
483	return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
484}
485
486int t3_phy_lasi_intr_clear(struct cphy *phy)
487{
488	u32 val;
489
490	return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
491}
492
493int t3_phy_lasi_intr_handler(struct cphy *phy)
494{
495	unsigned int status;
496	int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
497			       &status);
498
499	if (err)
500		return err;
501	return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
502}
503
504static const struct adapter_info t3_adap_info[] = {
505	{1, 1, 0,
506	 F_GPIO2_OEN | F_GPIO4_OEN |
507	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
508	 &mi1_mdio_ops, "Chelsio PE9000"},
509	{1, 1, 0,
510	 F_GPIO2_OEN | F_GPIO4_OEN |
511	 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
512	 &mi1_mdio_ops, "Chelsio T302"},
513	{1, 0, 0,
514	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
515	 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
516	 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
517	 &mi1_mdio_ext_ops, "Chelsio T310"},
518	{1, 1, 0,
519	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
520	 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
521	 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
522	 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
523	 &mi1_mdio_ext_ops, "Chelsio T320"},
524	{},
525	{},
526	{1, 0, 0,
527	 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
528	 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
529	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
530	 &mi1_mdio_ext_ops, "Chelsio T310" },
531	{1, 0, 0,
532	 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
533	 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
534	 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
535	 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
536};
537
538/*
539 * Return the adapter_info structure with a given index.  Out-of-range indices
540 * return NULL.
541 */
542const struct adapter_info *t3_get_adapter_info(unsigned int id)
543{
544	return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
545}
546
547struct port_type_info {
548	int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
549			int phy_addr, const struct mdio_ops *ops);
550};
551
552static const struct port_type_info port_types[] = {
553	{ NULL },
554	{ t3_ael1002_phy_prep },
555	{ t3_vsc8211_phy_prep },
556	{ NULL},
557	{ t3_xaui_direct_phy_prep },
558	{ t3_ael2005_phy_prep },
559	{ t3_qt2045_phy_prep },
560	{ t3_ael1006_phy_prep },
561	{ NULL },
562	{ t3_aq100x_phy_prep },
563	{ t3_ael2020_phy_prep },
564};
565
566#define VPD_ENTRY(name, len) \
567	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
568
569/*
570 * Partial EEPROM Vital Product Data structure.  Includes only the ID and
571 * VPD-R sections.
572 */
573struct t3_vpd {
574	u8 id_tag;
575	u8 id_len[2];
576	u8 id_data[16];
577	u8 vpdr_tag;
578	u8 vpdr_len[2];
579	VPD_ENTRY(pn, 16);	/* part number */
580	VPD_ENTRY(ec, 16);	/* EC level */
581	VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
582	VPD_ENTRY(na, 12);	/* MAC address base */
583	VPD_ENTRY(cclk, 6);	/* core clock */
584	VPD_ENTRY(mclk, 6);	/* mem clock */
585	VPD_ENTRY(uclk, 6);	/* uP clk */
586	VPD_ENTRY(mdc, 6);	/* MDIO clk */
587	VPD_ENTRY(mt, 2);	/* mem timing */
588	VPD_ENTRY(xaui0cfg, 6);	/* XAUI0 config */
589	VPD_ENTRY(xaui1cfg, 6);	/* XAUI1 config */
590	VPD_ENTRY(port0, 2);	/* PHY0 complex */
591	VPD_ENTRY(port1, 2);	/* PHY1 complex */
592	VPD_ENTRY(port2, 2);	/* PHY2 complex */
593	VPD_ENTRY(port3, 2);	/* PHY3 complex */
594	VPD_ENTRY(rv, 1);	/* csum */
595	u32 pad;		/* for multiple-of-4 sizing and alignment */
596};
597
598#define EEPROM_MAX_POLL   40
599#define EEPROM_STAT_ADDR  0x4000
600#define VPD_BASE          0xc00
601
602/**
603 *	t3_seeprom_read - read a VPD EEPROM location
604 *	@adapter: adapter to read
605 *	@addr: EEPROM address
606 *	@data: where to store the read data
607 *
608 *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
609 *	VPD ROM capability.  A zero is written to the flag bit when the
610 *	address is written to the control register.  The hardware device will
611 *	set the flag to 1 when 4 bytes have been read into the data register.
612 */
613int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
614{
615	u16 val;
616	int attempts = EEPROM_MAX_POLL;
617	u32 v;
618	unsigned int base = adapter->params.pci.vpd_cap_addr;
619
620	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
621		return -EINVAL;
622
623	pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
624	do {
625		udelay(10);
626		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
627	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
628
629	if (!(val & PCI_VPD_ADDR_F)) {
630		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
631		return -EIO;
632	}
633	pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
634	*data = cpu_to_le32(v);
635	return 0;
636}
637
638/**
639 *	t3_seeprom_write - write a VPD EEPROM location
640 *	@adapter: adapter to write
641 *	@addr: EEPROM address
642 *	@data: value to write
643 *
644 *	Write a 32-bit word to a location in VPD EEPROM using the card's PCI
645 *	VPD ROM capability.
646 */
647int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
648{
649	u16 val;
650	int attempts = EEPROM_MAX_POLL;
651	unsigned int base = adapter->params.pci.vpd_cap_addr;
652
653	if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
654		return -EINVAL;
655
656	pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
657			       le32_to_cpu(data));
658	pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
659			      addr | PCI_VPD_ADDR_F);
660	do {
661		msleep(1);
662		pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
663	} while ((val & PCI_VPD_ADDR_F) && --attempts);
664
665	if (val & PCI_VPD_ADDR_F) {
666		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
667		return -EIO;
668	}
669	return 0;
670}
671
672/**
673 *	t3_seeprom_wp - enable/disable EEPROM write protection
674 *	@adapter: the adapter
675 *	@enable: 1 to enable write protection, 0 to disable it
676 *
677 *	Enables or disables write protection on the serial EEPROM.
678 */
679int t3_seeprom_wp(struct adapter *adapter, int enable)
680{
681	return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682}
683
684/**
685 *	get_vpd_params - read VPD parameters from VPD EEPROM
686 *	@adapter: adapter to read
687 *	@p: where to store the parameters
688 *
689 *	Reads card parameters stored in VPD EEPROM.
690 */
691static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
692{
693	int i, addr, ret;
694	struct t3_vpd vpd;
695
696	/*
697	 * Card information is normally at VPD_BASE but some early cards had
698	 * it at 0.
699	 */
700	ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
701	if (ret)
702		return ret;
703	addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
704
705	for (i = 0; i < sizeof(vpd); i += 4) {
706		ret = t3_seeprom_read(adapter, addr + i,
707				      (__le32 *)((u8 *)&vpd + i));
708		if (ret)
709			return ret;
710	}
711
712	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
713	p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
714	p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
715	p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
716	p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
717	memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
718
719	/* Old eeproms didn't have port information */
720	if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
721		p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
722		p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
723	} else {
724		p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
725		p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
726		p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
727		p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
728	}
729
730	for (i = 0; i < 6; i++)
731		p->eth_base[i] = hex_to_bin(vpd.na_data[2 * i]) * 16 +
732				 hex_to_bin(vpd.na_data[2 * i + 1]);
733	return 0;
734}
735
736/* serial flash and firmware constants */
737enum {
738	SF_ATTEMPTS = 5,	/* max retries for SF1 operations */
739	SF_SEC_SIZE = 64 * 1024,	/* serial flash sector size */
740	SF_SIZE = SF_SEC_SIZE * 8,	/* serial flash size */
741
742	/* flash command opcodes */
743	SF_PROG_PAGE = 2,	/* program page */
744	SF_WR_DISABLE = 4,	/* disable writes */
745	SF_RD_STATUS = 5,	/* read status register */
746	SF_WR_ENABLE = 6,	/* enable writes */
747	SF_RD_DATA_FAST = 0xb,	/* read flash */
748	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
749
750	FW_FLASH_BOOT_ADDR = 0x70000,	/* start address of FW in flash */
751	FW_VERS_ADDR = 0x7fffc,    /* flash address holding FW version */
752	FW_MIN_SIZE = 8            /* at least version and csum */
753};
754
755/**
756 *	sf1_read - read data from the serial flash
757 *	@adapter: the adapter
758 *	@byte_cnt: number of bytes to read
759 *	@cont: whether another operation will be chained
760 *	@valp: where to store the read data
761 *
762 *	Reads up to 4 bytes of data from the serial flash.  The location of
763 *	the read needs to be specified prior to calling this by issuing the
764 *	appropriate commands to the serial flash.
765 */
766static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
767		    u32 *valp)
768{
769	int ret;
770
771	if (!byte_cnt || byte_cnt > 4)
772		return -EINVAL;
773	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
774		return -EBUSY;
775	t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
776	ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
777	if (!ret)
778		*valp = t3_read_reg(adapter, A_SF_DATA);
779	return ret;
780}
781
782/**
783 *	sf1_write - write data to the serial flash
784 *	@adapter: the adapter
785 *	@byte_cnt: number of bytes to write
786 *	@cont: whether another operation will be chained
787 *	@val: value to write
788 *
789 *	Writes up to 4 bytes of data to the serial flash.  The location of
790 *	the write needs to be specified prior to calling this by issuing the
791 *	appropriate commands to the serial flash.
792 */
793static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
794		     u32 val)
795{
796	if (!byte_cnt || byte_cnt > 4)
797		return -EINVAL;
798	if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
799		return -EBUSY;
800	t3_write_reg(adapter, A_SF_DATA, val);
801	t3_write_reg(adapter, A_SF_OP,
802		     V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
803	return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
804}
805
806/**
807 *	flash_wait_op - wait for a flash operation to complete
808 *	@adapter: the adapter
809 *	@attempts: max number of polls of the status register
810 *	@delay: delay between polls in ms
811 *
812 *	Wait for a flash operation to complete by polling the status register.
813 */
814static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
815{
816	int ret;
817	u32 status;
818
819	while (1) {
820		if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
821		    (ret = sf1_read(adapter, 1, 0, &status)) != 0)
822			return ret;
823		if (!(status & 1))
824			return 0;
825		if (--attempts == 0)
826			return -EAGAIN;
827		if (delay)
828			msleep(delay);
829	}
830}
831
832/**
833 *	t3_read_flash - read words from serial flash
834 *	@adapter: the adapter
835 *	@addr: the start address for the read
836 *	@nwords: how many 32-bit words to read
837 *	@data: where to store the read data
838 *	@byte_oriented: whether to store data as bytes or as words
839 *
840 *	Read the specified number of 32-bit words from the serial flash.
841 *	If @byte_oriented is set the read data is stored as a byte array
842 *	(i.e., big-endian), otherwise as 32-bit words in the platform's
843 *	natural endianess.
844 */
845static int t3_read_flash(struct adapter *adapter, unsigned int addr,
846			 unsigned int nwords, u32 *data, int byte_oriented)
847{
848	int ret;
849
850	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
851		return -EINVAL;
852
853	addr = swab32(addr) | SF_RD_DATA_FAST;
854
855	if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
856	    (ret = sf1_read(adapter, 1, 1, data)) != 0)
857		return ret;
858
859	for (; nwords; nwords--, data++) {
860		ret = sf1_read(adapter, 4, nwords > 1, data);
861		if (ret)
862			return ret;
863		if (byte_oriented)
864			*data = htonl(*data);
865	}
866	return 0;
867}
868
869/**
870 *	t3_write_flash - write up to a page of data to the serial flash
871 *	@adapter: the adapter
872 *	@addr: the start address to write
873 *	@n: length of data to write
874 *	@data: the data to write
875 *
876 *	Writes up to a page of data (256 bytes) to the serial flash starting
877 *	at the given address.
878 */
879static int t3_write_flash(struct adapter *adapter, unsigned int addr,
880			  unsigned int n, const u8 *data)
881{
882	int ret;
883	u32 buf[64];
884	unsigned int i, c, left, val, offset = addr & 0xff;
885
886	if (addr + n > SF_SIZE || offset + n > 256)
887		return -EINVAL;
888
889	val = swab32(addr) | SF_PROG_PAGE;
890
891	if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
892	    (ret = sf1_write(adapter, 4, 1, val)) != 0)
893		return ret;
894
895	for (left = n; left; left -= c) {
896		c = min(left, 4U);
897		for (val = 0, i = 0; i < c; ++i)
898			val = (val << 8) + *data++;
899
900		ret = sf1_write(adapter, c, c != left, val);
901		if (ret)
902			return ret;
903	}
904	if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
905		return ret;
906
907	/* Read the page to verify the write succeeded */
908	ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
909	if (ret)
910		return ret;
911
912	if (memcmp(data - n, (u8 *) buf + offset, n))
913		return -EIO;
914	return 0;
915}
916
917/**
918 *	t3_get_tp_version - read the tp sram version
919 *	@adapter: the adapter
920 *	@vers: where to place the version
921 *
922 *	Reads the protocol sram version from sram.
923 */
924int t3_get_tp_version(struct adapter *adapter, u32 *vers)
925{
926	int ret;
927
928	/* Get version loaded in SRAM */
929	t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
930	ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
931			      1, 1, 5, 1);
932	if (ret)
933		return ret;
934
935	*vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
936
937	return 0;
938}
939
940/**
941 *	t3_check_tpsram_version - read the tp sram version
942 *	@adapter: the adapter
943 *
944 *	Reads the protocol sram version from flash.
945 */
946int t3_check_tpsram_version(struct adapter *adapter)
947{
948	int ret;
949	u32 vers;
950	unsigned int major, minor;
951
952	if (adapter->params.rev == T3_REV_A)
953		return 0;
954
955
956	ret = t3_get_tp_version(adapter, &vers);
957	if (ret)
958		return ret;
959
960	major = G_TP_VERSION_MAJOR(vers);
961	minor = G_TP_VERSION_MINOR(vers);
962
963	if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
964		return 0;
965	else {
966		CH_ERR(adapter, "found wrong TP version (%u.%u), "
967		       "driver compiled for version %d.%d\n", major, minor,
968		       TP_VERSION_MAJOR, TP_VERSION_MINOR);
969	}
970	return -EINVAL;
971}
972
973/**
974 *	t3_check_tpsram - check if provided protocol SRAM
975 *			  is compatible with this driver
976 *	@adapter: the adapter
977 *	@tp_sram: the firmware image to write
978 *	@size: image size
979 *
980 *	Checks if an adapter's tp sram is compatible with the driver.
981 *	Returns 0 if the versions are compatible, a negative error otherwise.
982 */
983int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
984		    unsigned int size)
985{
986	u32 csum;
987	unsigned int i;
988	const __be32 *p = (const __be32 *)tp_sram;
989
990	/* Verify checksum */
991	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
992		csum += ntohl(p[i]);
993	if (csum != 0xffffffff) {
994		CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
995		       csum);
996		return -EINVAL;
997	}
998
999	return 0;
1000}
1001
1002enum fw_version_type {
1003	FW_VERSION_N3,
1004	FW_VERSION_T3
1005};
1006
1007/**
1008 *	t3_get_fw_version - read the firmware version
1009 *	@adapter: the adapter
1010 *	@vers: where to place the version
1011 *
1012 *	Reads the FW version from flash.
1013 */
1014int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1015{
1016	return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1017}
1018
1019/**
1020 *	t3_check_fw_version - check if the FW is compatible with this driver
1021 *	@adapter: the adapter
1022 *
1023 *	Checks if an adapter's FW is compatible with the driver.  Returns 0
1024 *	if the versions are compatible, a negative error otherwise.
1025 */
1026int t3_check_fw_version(struct adapter *adapter)
1027{
1028	int ret;
1029	u32 vers;
1030	unsigned int type, major, minor;
1031
1032	ret = t3_get_fw_version(adapter, &vers);
1033	if (ret)
1034		return ret;
1035
1036	type = G_FW_VERSION_TYPE(vers);
1037	major = G_FW_VERSION_MAJOR(vers);
1038	minor = G_FW_VERSION_MINOR(vers);
1039
1040	if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1041	    minor == FW_VERSION_MINOR)
1042		return 0;
1043	else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1044		CH_WARN(adapter, "found old FW minor version(%u.%u), "
1045		        "driver compiled for version %u.%u\n", major, minor,
1046			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1047	else {
1048		CH_WARN(adapter, "found newer FW version(%u.%u), "
1049		        "driver compiled for version %u.%u\n", major, minor,
1050			FW_VERSION_MAJOR, FW_VERSION_MINOR);
1051			return 0;
1052	}
1053	return -EINVAL;
1054}
1055
1056/**
1057 *	t3_flash_erase_sectors - erase a range of flash sectors
1058 *	@adapter: the adapter
1059 *	@start: the first sector to erase
1060 *	@end: the last sector to erase
1061 *
1062 *	Erases the sectors in the given range.
1063 */
1064static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1065{
1066	while (start <= end) {
1067		int ret;
1068
1069		if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1070		    (ret = sf1_write(adapter, 4, 0,
1071				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
1072		    (ret = flash_wait_op(adapter, 5, 500)) != 0)
1073			return ret;
1074		start++;
1075	}
1076	return 0;
1077}
1078
1079/*
1080 *	t3_load_fw - download firmware
1081 *	@adapter: the adapter
1082 *	@fw_data: the firmware image to write
1083 *	@size: image size
1084 *
1085 *	Write the supplied firmware image to the card's serial flash.
1086 *	The FW image has the following sections: @size - 8 bytes of code and
1087 *	data, followed by 4 bytes of FW version, followed by the 32-bit
1088 *	1's complement checksum of the whole image.
1089 */
1090int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1091{
1092	u32 csum;
1093	unsigned int i;
1094	const __be32 *p = (const __be32 *)fw_data;
1095	int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1096
1097	if ((size & 3) || size < FW_MIN_SIZE)
1098		return -EINVAL;
1099	if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1100		return -EFBIG;
1101
1102	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1103		csum += ntohl(p[i]);
1104	if (csum != 0xffffffff) {
1105		CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1106		       csum);
1107		return -EINVAL;
1108	}
1109
1110	ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1111	if (ret)
1112		goto out;
1113
1114	size -= 8;		/* trim off version and checksum */
1115	for (addr = FW_FLASH_BOOT_ADDR; size;) {
1116		unsigned int chunk_size = min(size, 256U);
1117
1118		ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1119		if (ret)
1120			goto out;
1121
1122		addr += chunk_size;
1123		fw_data += chunk_size;
1124		size -= chunk_size;
1125	}
1126
1127	ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1128out:
1129	if (ret)
1130		CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1131	return ret;
1132}
1133
1134#define CIM_CTL_BASE 0x2000
1135
1136/**
1137 *      t3_cim_ctl_blk_read - read a block from CIM control region
1138 *
1139 *      @adap: the adapter
1140 *      @addr: the start address within the CIM control region
1141 *      @n: number of words to read
1142 *      @valp: where to store the result
1143 *
1144 *      Reads a block of 4-byte words from the CIM control region.
1145 */
1146int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1147			unsigned int n, unsigned int *valp)
1148{
1149	int ret = 0;
1150
1151	if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1152		return -EBUSY;
1153
1154	for ( ; !ret && n--; addr += 4) {
1155		t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1156		ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1157				      0, 5, 2);
1158		if (!ret)
1159			*valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1160	}
1161	return ret;
1162}
1163
1164static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1165			       u32 *rx_hash_high, u32 *rx_hash_low)
1166{
1167	/* stop Rx unicast traffic */
1168	t3_mac_disable_exact_filters(mac);
1169
1170	/* stop broadcast, multicast, promiscuous mode traffic */
1171	*rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1172	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1173			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1174			 F_DISBCAST);
1175
1176	*rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1177	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1178
1179	*rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1180	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1181
1182	/* Leave time to drain max RX fifo */
1183	msleep(1);
1184}
1185
1186static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1187			       u32 rx_hash_high, u32 rx_hash_low)
1188{
1189	t3_mac_enable_exact_filters(mac);
1190	t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1191			 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1192			 rx_cfg);
1193	t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1194	t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1195}
1196
1197/**
1198 *	t3_link_changed - handle interface link changes
1199 *	@adapter: the adapter
1200 *	@port_id: the port index that changed link state
1201 *
1202 *	Called when a port's link settings change to propagate the new values
1203 *	to the associated PHY and MAC.  After performing the common tasks it
1204 *	invokes an OS-specific handler.
1205 */
1206void t3_link_changed(struct adapter *adapter, int port_id)
1207{
1208	int link_ok, speed, duplex, fc;
1209	struct port_info *pi = adap2pinfo(adapter, port_id);
1210	struct cphy *phy = &pi->phy;
1211	struct cmac *mac = &pi->mac;
1212	struct link_config *lc = &pi->link_config;
1213
1214	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1215
1216	if (!lc->link_ok && link_ok) {
1217		u32 rx_cfg, rx_hash_high, rx_hash_low;
1218		u32 status;
1219
1220		t3_xgm_intr_enable(adapter, port_id);
1221		t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1222		t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1223		t3_mac_enable(mac, MAC_DIRECTION_RX);
1224
1225		status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1226		if (status & F_LINKFAULTCHANGE) {
1227			mac->stats.link_faults++;
1228			pi->link_fault = 1;
1229		}
1230		t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1231	}
1232
1233	if (lc->requested_fc & PAUSE_AUTONEG)
1234		fc &= lc->requested_fc;
1235	else
1236		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1237
1238	if (link_ok == lc->link_ok && speed == lc->speed &&
1239	    duplex == lc->duplex && fc == lc->fc)
1240		return;                            /* nothing changed */
1241
1242	if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1243	    uses_xaui(adapter)) {
1244		if (link_ok)
1245			t3b_pcs_reset(mac);
1246		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1247			     link_ok ? F_TXACTENABLE | F_RXEN : 0);
1248	}
1249	lc->link_ok = link_ok;
1250	lc->speed = speed < 0 ? SPEED_INVALID : speed;
1251	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1252
1253	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1254		/* Set MAC speed, duplex, and flow control to match PHY. */
1255		t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1256		lc->fc = fc;
1257	}
1258
1259	t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1260			   speed, duplex, fc);
1261}
1262
1263void t3_link_fault(struct adapter *adapter, int port_id)
1264{
1265	struct port_info *pi = adap2pinfo(adapter, port_id);
1266	struct cmac *mac = &pi->mac;
1267	struct cphy *phy = &pi->phy;
1268	struct link_config *lc = &pi->link_config;
1269	int link_ok, speed, duplex, fc, link_fault;
1270	u32 rx_cfg, rx_hash_high, rx_hash_low;
1271
1272	t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1273
1274	if (adapter->params.rev > 0 && uses_xaui(adapter))
1275		t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1276
1277	t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1278	t3_mac_enable(mac, MAC_DIRECTION_RX);
1279
1280	t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1281
1282	link_fault = t3_read_reg(adapter,
1283				 A_XGM_INT_STATUS + mac->offset);
1284	link_fault &= F_LINKFAULTCHANGE;
1285
1286	link_ok = lc->link_ok;
1287	speed = lc->speed;
1288	duplex = lc->duplex;
1289	fc = lc->fc;
1290
1291	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1292
1293	if (link_fault) {
1294		lc->link_ok = 0;
1295		lc->speed = SPEED_INVALID;
1296		lc->duplex = DUPLEX_INVALID;
1297
1298		t3_os_link_fault(adapter, port_id, 0);
1299
1300		/* Account link faults only when the phy reports a link up */
1301		if (link_ok)
1302			mac->stats.link_faults++;
1303	} else {
1304		if (link_ok)
1305			t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1306				     F_TXACTENABLE | F_RXEN);
1307
1308		pi->link_fault = 0;
1309		lc->link_ok = (unsigned char)link_ok;
1310		lc->speed = speed < 0 ? SPEED_INVALID : speed;
1311		lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1312		t3_os_link_fault(adapter, port_id, link_ok);
1313	}
1314}
1315
1316/**
1317 *	t3_link_start - apply link configuration to MAC/PHY
1318 *	@phy: the PHY to setup
1319 *	@mac: the MAC to setup
1320 *	@lc: the requested link configuration
1321 *
1322 *	Set up a port's MAC and PHY according to a desired link configuration.
1323 *	- If the PHY can auto-negotiate first decide what to advertise, then
1324 *	  enable/disable auto-negotiation as desired, and reset.
1325 *	- If the PHY does not auto-negotiate just reset it.
1326 *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1327 *	  otherwise do it later based on the outcome of auto-negotiation.
1328 */
1329int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1330{
1331	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1332
1333	lc->link_ok = 0;
1334	if (lc->supported & SUPPORTED_Autoneg) {
1335		lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1336		if (fc) {
1337			lc->advertising |= ADVERTISED_Asym_Pause;
1338			if (fc & PAUSE_RX)
1339				lc->advertising |= ADVERTISED_Pause;
1340		}
1341		phy->ops->advertise(phy, lc->advertising);
1342
1343		if (lc->autoneg == AUTONEG_DISABLE) {
1344			lc->speed = lc->requested_speed;
1345			lc->duplex = lc->requested_duplex;
1346			lc->fc = (unsigned char)fc;
1347			t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1348						   fc);
1349			/* Also disables autoneg */
1350			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1351		} else
1352			phy->ops->autoneg_enable(phy);
1353	} else {
1354		t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1355		lc->fc = (unsigned char)fc;
1356		phy->ops->reset(phy, 0);
1357	}
1358	return 0;
1359}
1360
1361/**
1362 *	t3_set_vlan_accel - control HW VLAN extraction
1363 *	@adapter: the adapter
1364 *	@ports: bitmap of adapter ports to operate on
1365 *	@on: enable (1) or disable (0) HW VLAN extraction
1366 *
1367 *	Enables or disables HW extraction of VLAN tags for the given port.
1368 */
1369void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1370{
1371	t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1372			 ports << S_VLANEXTRACTIONENABLE,
1373			 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1374}
1375
1376struct intr_info {
1377	unsigned int mask;	/* bits to check in interrupt status */
1378	const char *msg;	/* message to print or NULL */
1379	short stat_idx;		/* stat counter to increment or -1 */
1380	unsigned short fatal;	/* whether the condition reported is fatal */
1381};
1382
1383/**
1384 *	t3_handle_intr_status - table driven interrupt handler
1385 *	@adapter: the adapter that generated the interrupt
1386 *	@reg: the interrupt status register to process
1387 *	@mask: a mask to apply to the interrupt status
1388 *	@acts: table of interrupt actions
1389 *	@stats: statistics counters tracking interrupt occurrences
1390 *
1391 *	A table driven interrupt handler that applies a set of masks to an
1392 *	interrupt status word and performs the corresponding actions if the
1393 *	interrupts described by the mask have occurred.  The actions include
1394 *	optionally printing a warning or alert message, and optionally
1395 *	incrementing a stat counter.  The table is terminated by an entry
1396 *	specifying mask 0.  Returns the number of fatal interrupt conditions.
1397 */
1398static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1399				 unsigned int mask,
1400				 const struct intr_info *acts,
1401				 unsigned long *stats)
1402{
1403	int fatal = 0;
1404	unsigned int status = t3_read_reg(adapter, reg) & mask;
1405
1406	for (; acts->mask; ++acts) {
1407		if (!(status & acts->mask))
1408			continue;
1409		if (acts->fatal) {
1410			fatal++;
1411			CH_ALERT(adapter, "%s (0x%x)\n",
1412				 acts->msg, status & acts->mask);
1413			status &= ~acts->mask;
1414		} else if (acts->msg)
1415			CH_WARN(adapter, "%s (0x%x)\n",
1416				acts->msg, status & acts->mask);
1417		if (acts->stat_idx >= 0)
1418			stats[acts->stat_idx]++;
1419	}
1420	if (status)		/* clear processed interrupts */
1421		t3_write_reg(adapter, reg, status);
1422	return fatal;
1423}
1424
1425#define SGE_INTR_MASK (F_RSPQDISABLED | \
1426		       F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1427		       F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1428		       F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1429		       V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1430		       F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1431		       F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1432		       F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1433		       F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1434		       F_LOPIODRBDROPERR)
1435#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1436		       F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1437		       F_NFASRCHFAIL)
1438#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1439#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1440		       V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1441		       F_TXFIFO_UNDERRUN)
1442#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1443			F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1444			F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1445			F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1446			V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1447			V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1448#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1449			F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1450			/* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1451			F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1452			F_TXPARERR | V_BISTERR(M_BISTERR))
1453#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1454			 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1455			 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1456#define ULPTX_INTR_MASK 0xfc
1457#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1458			 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1459			 F_ZERO_SWITCH_ERROR)
1460#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1461		       F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1462		       F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1463	 	       F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1464		       F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1465		       F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1466		       F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1467		       F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1468#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1469			V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1470			V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1471#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1472			V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1473			V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1474#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1475		       V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1476		       V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1477		       V_MCAPARERRENB(M_MCAPARERRENB))
1478#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1479#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1480		      F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1481		      F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1482		      F_MPS0 | F_CPL_SWITCH)
1483/*
1484 * Interrupt handler for the PCIX1 module.
1485 */
1486static void pci_intr_handler(struct adapter *adapter)
1487{
1488	static const struct intr_info pcix1_intr_info[] = {
1489		{F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1490		{F_SIGTARABT, "PCI signaled target abort", -1, 1},
1491		{F_RCVTARABT, "PCI received target abort", -1, 1},
1492		{F_RCVMSTABT, "PCI received master abort", -1, 1},
1493		{F_SIGSYSERR, "PCI signaled system error", -1, 1},
1494		{F_DETPARERR, "PCI detected parity error", -1, 1},
1495		{F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1496		{F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1497		{F_RCVSPLCMPERR, "PCI received split completion error", -1,
1498		 1},
1499		{F_DETCORECCERR, "PCI correctable ECC error",
1500		 STAT_PCI_CORR_ECC, 0},
1501		{F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1502		{F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1503		{V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1504		 1},
1505		{V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1506		 1},
1507		{V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1508		 1},
1509		{V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1510		 "error", -1, 1},
1511		{0}
1512	};
1513
1514	if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1515				  pcix1_intr_info, adapter->irq_stats))
1516		t3_fatal_err(adapter);
1517}
1518
1519/*
1520 * Interrupt handler for the PCIE module.
1521 */
1522static void pcie_intr_handler(struct adapter *adapter)
1523{
1524	static const struct intr_info pcie_intr_info[] = {
1525		{F_PEXERR, "PCI PEX error", -1, 1},
1526		{F_UNXSPLCPLERRR,
1527		 "PCI unexpected split completion DMA read error", -1, 1},
1528		{F_UNXSPLCPLERRC,
1529		 "PCI unexpected split completion DMA command error", -1, 1},
1530		{F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1531		{F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1532		{F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1533		{F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1534		{V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1535		 "PCI MSI-X table/PBA parity error", -1, 1},
1536		{F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1537		{F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1538		{F_RXPARERR, "PCI Rx parity error", -1, 1},
1539		{F_TXPARERR, "PCI Tx parity error", -1, 1},
1540		{V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1541		{0}
1542	};
1543
1544	if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1545		CH_ALERT(adapter, "PEX error code 0x%x\n",
1546			 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1547
1548	if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1549				  pcie_intr_info, adapter->irq_stats))
1550		t3_fatal_err(adapter);
1551}
1552
1553/*
1554 * TP interrupt handler.
1555 */
1556static void tp_intr_handler(struct adapter *adapter)
1557{
1558	static const struct intr_info tp_intr_info[] = {
1559		{0xffffff, "TP parity error", -1, 1},
1560		{0x1000000, "TP out of Rx pages", -1, 1},
1561		{0x2000000, "TP out of Tx pages", -1, 1},
1562		{0}
1563	};
1564
1565	static const struct intr_info tp_intr_info_t3c[] = {
1566		{0x1fffffff, "TP parity error", -1, 1},
1567		{F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1568		{F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1569		{0}
1570	};
1571
1572	if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1573				  adapter->params.rev < T3_REV_C ?
1574				  tp_intr_info : tp_intr_info_t3c, NULL))
1575		t3_fatal_err(adapter);
1576}
1577
1578/*
1579 * CIM interrupt handler.
1580 */
1581static void cim_intr_handler(struct adapter *adapter)
1582{
1583	static const struct intr_info cim_intr_info[] = {
1584		{F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1585		{F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1586		{F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1587		{F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1588		{F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1589		{F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1590		{F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1591		{F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1592		{F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1593		{F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1594		{F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1595		{F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1596		{F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1597		{F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1598		{F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1599		{F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1600		{F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1601		{F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1602		{F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1603		{F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1604		{F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1605		{F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1606		{F_ITAGPARERR, "CIM itag parity error", -1, 1},
1607		{F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1608		{0}
1609	};
1610
1611	if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1612				  cim_intr_info, NULL))
1613		t3_fatal_err(adapter);
1614}
1615
1616/*
1617 * ULP RX interrupt handler.
1618 */
1619static void ulprx_intr_handler(struct adapter *adapter)
1620{
1621	static const struct intr_info ulprx_intr_info[] = {
1622		{F_PARERRDATA, "ULP RX data parity error", -1, 1},
1623		{F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1624		{F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1625		{F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1626		{F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1627		{F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1628		{F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1629		{F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1630		{0}
1631	};
1632
1633	if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1634				  ulprx_intr_info, NULL))
1635		t3_fatal_err(adapter);
1636}
1637
1638/*
1639 * ULP TX interrupt handler.
1640 */
1641static void ulptx_intr_handler(struct adapter *adapter)
1642{
1643	static const struct intr_info ulptx_intr_info[] = {
1644		{F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1645		 STAT_ULP_CH0_PBL_OOB, 0},
1646		{F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1647		 STAT_ULP_CH1_PBL_OOB, 0},
1648		{0xfc, "ULP TX parity error", -1, 1},
1649		{0}
1650	};
1651
1652	if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1653				  ulptx_intr_info, adapter->irq_stats))
1654		t3_fatal_err(adapter);
1655}
1656
1657#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1658	F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1659	F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1660	F_ICSPI1_TX_FRAMING_ERROR)
1661#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1662	F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1663	F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1664	F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1665
1666/*
1667 * PM TX interrupt handler.
1668 */
1669static void pmtx_intr_handler(struct adapter *adapter)
1670{
1671	static const struct intr_info pmtx_intr_info[] = {
1672		{F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1673		{ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1674		{OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1675		{V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1676		 "PMTX ispi parity error", -1, 1},
1677		{V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1678		 "PMTX ospi parity error", -1, 1},
1679		{0}
1680	};
1681
1682	if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1683				  pmtx_intr_info, NULL))
1684		t3_fatal_err(adapter);
1685}
1686
1687#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1688	F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1689	F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1690	F_IESPI1_TX_FRAMING_ERROR)
1691#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1692	F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1693	F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1694	F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1695
1696/*
1697 * PM RX interrupt handler.
1698 */
1699static void pmrx_intr_handler(struct adapter *adapter)
1700{
1701	static const struct intr_info pmrx_intr_info[] = {
1702		{F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1703		{IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1704		{OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1705		{V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1706		 "PMRX ispi parity error", -1, 1},
1707		{V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1708		 "PMRX ospi parity error", -1, 1},
1709		{0}
1710	};
1711
1712	if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1713				  pmrx_intr_info, NULL))
1714		t3_fatal_err(adapter);
1715}
1716
1717/*
1718 * CPL switch interrupt handler.
1719 */
1720static void cplsw_intr_handler(struct adapter *adapter)
1721{
1722	static const struct intr_info cplsw_intr_info[] = {
1723		{F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1724		{F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1725		{F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1726		{F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1727		{F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1728		{F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1729		{0}
1730	};
1731
1732	if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1733				  cplsw_intr_info, NULL))
1734		t3_fatal_err(adapter);
1735}
1736
1737/*
1738 * MPS interrupt handler.
1739 */
1740static void mps_intr_handler(struct adapter *adapter)
1741{
1742	static const struct intr_info mps_intr_info[] = {
1743		{0x1ff, "MPS parity error", -1, 1},
1744		{0}
1745	};
1746
1747	if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1748				  mps_intr_info, NULL))
1749		t3_fatal_err(adapter);
1750}
1751
1752#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1753
1754/*
1755 * MC7 interrupt handler.
1756 */
1757static void mc7_intr_handler(struct mc7 *mc7)
1758{
1759	struct adapter *adapter = mc7->adapter;
1760	u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1761
1762	if (cause & F_CE) {
1763		mc7->stats.corr_err++;
1764		CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1765			"data 0x%x 0x%x 0x%x\n", mc7->name,
1766			t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1767			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1768			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1769			t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1770	}
1771
1772	if (cause & F_UE) {
1773		mc7->stats.uncorr_err++;
1774		CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1775			 "data 0x%x 0x%x 0x%x\n", mc7->name,
1776			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1777			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1778			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1779			 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1780	}
1781
1782	if (G_PE(cause)) {
1783		mc7->stats.parity_err++;
1784		CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1785			 mc7->name, G_PE(cause));
1786	}
1787
1788	if (cause & F_AE) {
1789		u32 addr = 0;
1790
1791		if (adapter->params.rev > 0)
1792			addr = t3_read_reg(adapter,
1793					   mc7->offset + A_MC7_ERR_ADDR);
1794		mc7->stats.addr_err++;
1795		CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1796			 mc7->name, addr);
1797	}
1798
1799	if (cause & MC7_INTR_FATAL)
1800		t3_fatal_err(adapter);
1801
1802	t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1803}
1804
1805#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1806			V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1807/*
1808 * XGMAC interrupt handler.
1809 */
1810static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1811{
1812	struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1813	/*
1814	 * We mask out interrupt causes for which we're not taking interrupts.
1815	 * This allows us to use polling logic to monitor some of the other
1816	 * conditions when taking interrupts would impose too much load on the
1817	 * system.
1818	 */
1819	u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1820		    ~F_RXFIFO_OVERFLOW;
1821
1822	if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1823		mac->stats.tx_fifo_parity_err++;
1824		CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1825	}
1826	if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1827		mac->stats.rx_fifo_parity_err++;
1828		CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1829	}
1830	if (cause & F_TXFIFO_UNDERRUN)
1831		mac->stats.tx_fifo_urun++;
1832	if (cause & F_RXFIFO_OVERFLOW)
1833		mac->stats.rx_fifo_ovfl++;
1834	if (cause & V_SERDES_LOS(M_SERDES_LOS))
1835		mac->stats.serdes_signal_loss++;
1836	if (cause & F_XAUIPCSCTCERR)
1837		mac->stats.xaui_pcs_ctc_err++;
1838	if (cause & F_XAUIPCSALIGNCHANGE)
1839		mac->stats.xaui_pcs_align_change++;
1840	if (cause & F_XGM_INT) {
1841		t3_set_reg_field(adap,
1842				 A_XGM_INT_ENABLE + mac->offset,
1843				 F_XGM_INT, 0);
1844		mac->stats.link_faults++;
1845
1846		t3_os_link_fault_handler(adap, idx);
1847	}
1848
1849	if (cause & XGM_INTR_FATAL)
1850		t3_fatal_err(adap);
1851
1852	t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1853	return cause != 0;
1854}
1855
1856/*
1857 * Interrupt handler for PHY events.
1858 */
1859int t3_phy_intr_handler(struct adapter *adapter)
1860{
1861	u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1862
1863	for_each_port(adapter, i) {
1864		struct port_info *p = adap2pinfo(adapter, i);
1865
1866		if (!(p->phy.caps & SUPPORTED_IRQ))
1867			continue;
1868
1869		if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1870			int phy_cause = p->phy.ops->intr_handler(&p->phy);
1871
1872			if (phy_cause & cphy_cause_link_change)
1873				t3_link_changed(adapter, i);
1874			if (phy_cause & cphy_cause_fifo_error)
1875				p->phy.fifo_errors++;
1876			if (phy_cause & cphy_cause_module_change)
1877				t3_os_phymod_changed(adapter, i);
1878		}
1879	}
1880
1881	t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1882	return 0;
1883}
1884
1885/*
1886 * T3 slow path (non-data) interrupt handler.
1887 */
1888int t3_slow_intr_handler(struct adapter *adapter)
1889{
1890	u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1891
1892	cause &= adapter->slow_intr_mask;
1893	if (!cause)
1894		return 0;
1895	if (cause & F_PCIM0) {
1896		if (is_pcie(adapter))
1897			pcie_intr_handler(adapter);
1898		else
1899			pci_intr_handler(adapter);
1900	}
1901	if (cause & F_SGE3)
1902		t3_sge_err_intr_handler(adapter);
1903	if (cause & F_MC7_PMRX)
1904		mc7_intr_handler(&adapter->pmrx);
1905	if (cause & F_MC7_PMTX)
1906		mc7_intr_handler(&adapter->pmtx);
1907	if (cause & F_MC7_CM)
1908		mc7_intr_handler(&adapter->cm);
1909	if (cause & F_CIM)
1910		cim_intr_handler(adapter);
1911	if (cause & F_TP1)
1912		tp_intr_handler(adapter);
1913	if (cause & F_ULP2_RX)
1914		ulprx_intr_handler(adapter);
1915	if (cause & F_ULP2_TX)
1916		ulptx_intr_handler(adapter);
1917	if (cause & F_PM1_RX)
1918		pmrx_intr_handler(adapter);
1919	if (cause & F_PM1_TX)
1920		pmtx_intr_handler(adapter);
1921	if (cause & F_CPL_SWITCH)
1922		cplsw_intr_handler(adapter);
1923	if (cause & F_MPS0)
1924		mps_intr_handler(adapter);
1925	if (cause & F_MC5A)
1926		t3_mc5_intr_handler(&adapter->mc5);
1927	if (cause & F_XGMAC0_0)
1928		mac_intr_handler(adapter, 0);
1929	if (cause & F_XGMAC0_1)
1930		mac_intr_handler(adapter, 1);
1931	if (cause & F_T3DBG)
1932		t3_os_ext_intr_handler(adapter);
1933
1934	/* Clear the interrupts just processed. */
1935	t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1936	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
1937	return 1;
1938}
1939
1940static unsigned int calc_gpio_intr(struct adapter *adap)
1941{
1942	unsigned int i, gpi_intr = 0;
1943
1944	for_each_port(adap, i)
1945		if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1946		    adapter_info(adap)->gpio_intr[i])
1947			gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1948	return gpi_intr;
1949}
1950
1951/**
1952 *	t3_intr_enable - enable interrupts
1953 *	@adapter: the adapter whose interrupts should be enabled
1954 *
1955 *	Enable interrupts by setting the interrupt enable registers of the
1956 *	various HW modules and then enabling the top-level interrupt
1957 *	concentrator.
1958 */
1959void t3_intr_enable(struct adapter *adapter)
1960{
1961	static const struct addr_val_pair intr_en_avp[] = {
1962		{A_SG_INT_ENABLE, SGE_INTR_MASK},
1963		{A_MC7_INT_ENABLE, MC7_INTR_MASK},
1964		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1965		 MC7_INTR_MASK},
1966		{A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1967		 MC7_INTR_MASK},
1968		{A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1969		{A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1970		{A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1971		{A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1972		{A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1973		{A_MPS_INT_ENABLE, MPS_INTR_MASK},
1974	};
1975
1976	adapter->slow_intr_mask = PL_INTR_MASK;
1977
1978	t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1979	t3_write_reg(adapter, A_TP_INT_ENABLE,
1980		     adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1981
1982	if (adapter->params.rev > 0) {
1983		t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1984			     CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1985		t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1986			     ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1987			     F_PBL_BOUND_ERR_CH1);
1988	} else {
1989		t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1990		t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1991	}
1992
1993	t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1994
1995	if (is_pcie(adapter))
1996		t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1997	else
1998		t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1999	t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2000	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2001}
2002
2003/**
2004 *	t3_intr_disable - disable a card's interrupts
2005 *	@adapter: the adapter whose interrupts should be disabled
2006 *
2007 *	Disable interrupts.  We only disable the top-level interrupt
2008 *	concentrator and the SGE data interrupts.
2009 */
2010void t3_intr_disable(struct adapter *adapter)
2011{
2012	t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2013	t3_read_reg(adapter, A_PL_INT_ENABLE0);	/* flush */
2014	adapter->slow_intr_mask = 0;
2015}
2016
2017/**
2018 *	t3_intr_clear - clear all interrupts
2019 *	@adapter: the adapter whose interrupts should be cleared
2020 *
2021 *	Clears all interrupts.
2022 */
2023void t3_intr_clear(struct adapter *adapter)
2024{
2025	static const unsigned int cause_reg_addr[] = {
2026		A_SG_INT_CAUSE,
2027		A_SG_RSPQ_FL_STATUS,
2028		A_PCIX_INT_CAUSE,
2029		A_MC7_INT_CAUSE,
2030		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2031		A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2032		A_CIM_HOST_INT_CAUSE,
2033		A_TP_INT_CAUSE,
2034		A_MC5_DB_INT_CAUSE,
2035		A_ULPRX_INT_CAUSE,
2036		A_ULPTX_INT_CAUSE,
2037		A_CPL_INTR_CAUSE,
2038		A_PM1_TX_INT_CAUSE,
2039		A_PM1_RX_INT_CAUSE,
2040		A_MPS_INT_CAUSE,
2041		A_T3DBG_INT_CAUSE,
2042	};
2043	unsigned int i;
2044
2045	/* Clear PHY and MAC interrupts for each port. */
2046	for_each_port(adapter, i)
2047	    t3_port_intr_clear(adapter, i);
2048
2049	for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2050		t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2051
2052	if (is_pcie(adapter))
2053		t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2054	t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2055	t3_read_reg(adapter, A_PL_INT_CAUSE0);	/* flush */
2056}
2057
2058void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2059{
2060	struct port_info *pi = adap2pinfo(adapter, idx);
2061
2062	t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2063		     XGM_EXTRA_INTR_MASK);
2064}
2065
2066void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2067{
2068	struct port_info *pi = adap2pinfo(adapter, idx);
2069
2070	t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2071		     0x7ff);
2072}
2073
2074/**
2075 *	t3_port_intr_enable - enable port-specific interrupts
2076 *	@adapter: associated adapter
2077 *	@idx: index of port whose interrupts should be enabled
2078 *
2079 *	Enable port-specific (i.e., MAC and PHY) interrupts for the given
2080 *	adapter port.
2081 */
2082void t3_port_intr_enable(struct adapter *adapter, int idx)
2083{
2084	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2085
2086	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2087	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2088	phy->ops->intr_enable(phy);
2089}
2090
2091/**
2092 *	t3_port_intr_disable - disable port-specific interrupts
2093 *	@adapter: associated adapter
2094 *	@idx: index of port whose interrupts should be disabled
2095 *
2096 *	Disable port-specific (i.e., MAC and PHY) interrupts for the given
2097 *	adapter port.
2098 */
2099void t3_port_intr_disable(struct adapter *adapter, int idx)
2100{
2101	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2102
2103	t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2104	t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2105	phy->ops->intr_disable(phy);
2106}
2107
2108/**
2109 *	t3_port_intr_clear - clear port-specific interrupts
2110 *	@adapter: associated adapter
2111 *	@idx: index of port whose interrupts to clear
2112 *
2113 *	Clear port-specific (i.e., MAC and PHY) interrupts for the given
2114 *	adapter port.
2115 */
2116static void t3_port_intr_clear(struct adapter *adapter, int idx)
2117{
2118	struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2119
2120	t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2121	t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2122	phy->ops->intr_clear(phy);
2123}
2124
2125#define SG_CONTEXT_CMD_ATTEMPTS 100
2126
2127/**
2128 * 	t3_sge_write_context - write an SGE context
2129 * 	@adapter: the adapter
2130 * 	@id: the context id
2131 * 	@type: the context type
2132 *
2133 * 	Program an SGE context with the values already loaded in the
2134 * 	CONTEXT_DATA? registers.
2135 */
2136static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2137				unsigned int type)
2138{
2139	if (type == F_RESPONSEQ) {
2140		/*
2141		 * Can't write the Response Queue Context bits for
2142		 * Interrupt Armed or the Reserve bits after the chip
2143		 * has been initialized out of reset.  Writing to these
2144		 * bits can confuse the hardware.
2145		 */
2146		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2147		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2148		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2149		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2150	} else {
2151		t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2152		t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2153		t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2154		t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2155	}
2156	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2157		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2158	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2159			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2160}
2161
2162/**
2163 *	clear_sge_ctxt - completely clear an SGE context
2164 *	@adapter: the adapter
2165 *	@id: the context id
2166 *	@type: the context type
2167 *
2168 *	Completely clear an SGE context.  Used predominantly at post-reset
2169 *	initialization.  Note in particular that we don't skip writing to any
2170 *	"sensitive bits" in the contexts the way that t3_sge_write_context()
2171 *	does ...
2172 */
2173static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2174			  unsigned int type)
2175{
2176	t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2177	t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2178	t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2179	t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2180	t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2181	t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2182	t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2183	t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2184	t3_write_reg(adap, A_SG_CONTEXT_CMD,
2185		     V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2186	return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2187			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2188}
2189
2190/**
2191 *	t3_sge_init_ecntxt - initialize an SGE egress context
2192 *	@adapter: the adapter to configure
2193 *	@id: the context id
2194 *	@gts_enable: whether to enable GTS for the context
2195 *	@type: the egress context type
2196 *	@respq: associated response queue
2197 *	@base_addr: base address of queue
2198 *	@size: number of queue entries
2199 *	@token: uP token
2200 *	@gen: initial generation value for the context
2201 *	@cidx: consumer pointer
2202 *
2203 *	Initialize an SGE egress context and make it ready for use.  If the
2204 *	platform allows concurrent context operations, the caller is
2205 *	responsible for appropriate locking.
2206 */
2207int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2208		       enum sge_context_type type, int respq, u64 base_addr,
2209		       unsigned int size, unsigned int token, int gen,
2210		       unsigned int cidx)
2211{
2212	unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2213
2214	if (base_addr & 0xfff)	/* must be 4K aligned */
2215		return -EINVAL;
2216	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2217		return -EBUSY;
2218
2219	base_addr >>= 12;
2220	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2221		     V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2222	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2223		     V_EC_BASE_LO(base_addr & 0xffff));
2224	base_addr >>= 16;
2225	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2226	base_addr >>= 32;
2227	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2228		     V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2229		     V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2230		     F_EC_VALID);
2231	return t3_sge_write_context(adapter, id, F_EGRESS);
2232}
2233
2234/**
2235 *	t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2236 *	@adapter: the adapter to configure
2237 *	@id: the context id
2238 *	@gts_enable: whether to enable GTS for the context
2239 *	@base_addr: base address of queue
2240 *	@size: number of queue entries
2241 *	@bsize: size of each buffer for this queue
2242 *	@cong_thres: threshold to signal congestion to upstream producers
2243 *	@gen: initial generation value for the context
2244 *	@cidx: consumer pointer
2245 *
2246 *	Initialize an SGE free list context and make it ready for use.  The
2247 *	caller is responsible for ensuring only one context operation occurs
2248 *	at a time.
2249 */
2250int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2251			int gts_enable, u64 base_addr, unsigned int size,
2252			unsigned int bsize, unsigned int cong_thres, int gen,
2253			unsigned int cidx)
2254{
2255	if (base_addr & 0xfff)	/* must be 4K aligned */
2256		return -EINVAL;
2257	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2258		return -EBUSY;
2259
2260	base_addr >>= 12;
2261	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2262	base_addr >>= 32;
2263	t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2264		     V_FL_BASE_HI((u32) base_addr) |
2265		     V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2266	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2267		     V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2268		     V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2269	t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2270		     V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2271		     V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2272	return t3_sge_write_context(adapter, id, F_FREELIST);
2273}
2274
2275/**
2276 *	t3_sge_init_rspcntxt - initialize an SGE response queue context
2277 *	@adapter: the adapter to configure
2278 *	@id: the context id
2279 *	@irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2280 *	@base_addr: base address of queue
2281 *	@size: number of queue entries
2282 *	@fl_thres: threshold for selecting the normal or jumbo free list
2283 *	@gen: initial generation value for the context
2284 *	@cidx: consumer pointer
2285 *
2286 *	Initialize an SGE response queue context and make it ready for use.
2287 *	The caller is responsible for ensuring only one context operation
2288 *	occurs at a time.
2289 */
2290int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2291			 int irq_vec_idx, u64 base_addr, unsigned int size,
2292			 unsigned int fl_thres, int gen, unsigned int cidx)
2293{
2294	unsigned int intr = 0;
2295
2296	if (base_addr & 0xfff)	/* must be 4K aligned */
2297		return -EINVAL;
2298	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2299		return -EBUSY;
2300
2301	base_addr >>= 12;
2302	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2303		     V_CQ_INDEX(cidx));
2304	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2305	base_addr >>= 32;
2306	if (irq_vec_idx >= 0)
2307		intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2308	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2309		     V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2310	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2311	return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2312}
2313
2314/**
2315 *	t3_sge_init_cqcntxt - initialize an SGE completion queue context
2316 *	@adapter: the adapter to configure
2317 *	@id: the context id
2318 *	@base_addr: base address of queue
2319 *	@size: number of queue entries
2320 *	@rspq: response queue for async notifications
2321 *	@ovfl_mode: CQ overflow mode
2322 *	@credits: completion queue credits
2323 *	@credit_thres: the credit threshold
2324 *
2325 *	Initialize an SGE completion queue context and make it ready for use.
2326 *	The caller is responsible for ensuring only one context operation
2327 *	occurs at a time.
2328 */
2329int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2330			unsigned int size, int rspq, int ovfl_mode,
2331			unsigned int credits, unsigned int credit_thres)
2332{
2333	if (base_addr & 0xfff)	/* must be 4K aligned */
2334		return -EINVAL;
2335	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2336		return -EBUSY;
2337
2338	base_addr >>= 12;
2339	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2340	t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2341	base_addr >>= 32;
2342	t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2343		     V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2344		     V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2345		     V_CQ_ERR(ovfl_mode));
2346	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2347		     V_CQ_CREDIT_THRES(credit_thres));
2348	return t3_sge_write_context(adapter, id, F_CQ);
2349}
2350
2351/**
2352 *	t3_sge_enable_ecntxt - enable/disable an SGE egress context
2353 *	@adapter: the adapter
2354 *	@id: the egress context id
2355 *	@enable: enable (1) or disable (0) the context
2356 *
2357 *	Enable or disable an SGE egress context.  The caller is responsible for
2358 *	ensuring only one context operation occurs at a time.
2359 */
2360int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2361{
2362	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2363		return -EBUSY;
2364
2365	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2366	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2367	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2368	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2369	t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2370	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2371		     V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2372	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2373			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2374}
2375
2376/**
2377 *	t3_sge_disable_fl - disable an SGE free-buffer list
2378 *	@adapter: the adapter
2379 *	@id: the free list context id
2380 *
2381 *	Disable an SGE free-buffer list.  The caller is responsible for
2382 *	ensuring only one context operation occurs at a time.
2383 */
2384int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2385{
2386	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2387		return -EBUSY;
2388
2389	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2390	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2391	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2392	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2393	t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2394	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2395		     V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2396	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2397			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2398}
2399
2400/**
2401 *	t3_sge_disable_rspcntxt - disable an SGE response queue
2402 *	@adapter: the adapter
2403 *	@id: the response queue context id
2404 *
2405 *	Disable an SGE response queue.  The caller is responsible for
2406 *	ensuring only one context operation occurs at a time.
2407 */
2408int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2409{
2410	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2411		return -EBUSY;
2412
2413	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2414	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2415	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2416	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2417	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2418	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2419		     V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2420	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2421			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2422}
2423
2424/**
2425 *	t3_sge_disable_cqcntxt - disable an SGE completion queue
2426 *	@adapter: the adapter
2427 *	@id: the completion queue context id
2428 *
2429 *	Disable an SGE completion queue.  The caller is responsible for
2430 *	ensuring only one context operation occurs at a time.
2431 */
2432int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2433{
2434	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2435		return -EBUSY;
2436
2437	t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2438	t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2439	t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2440	t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2441	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2442	t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2443		     V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2444	return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2445			       0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2446}
2447
2448/**
2449 *	t3_sge_cqcntxt_op - perform an operation on a completion queue context
2450 *	@adapter: the adapter
2451 *	@id: the context id
2452 *	@op: the operation to perform
2453 *
2454 *	Perform the selected operation on an SGE completion queue context.
2455 *	The caller is responsible for ensuring only one context operation
2456 *	occurs at a time.
2457 */
2458int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2459		      unsigned int credits)
2460{
2461	u32 val;
2462
2463	if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2464		return -EBUSY;
2465
2466	t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2467	t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2468		     V_CONTEXT(id) | F_CQ);
2469	if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2470				0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2471		return -EIO;
2472
2473	if (op >= 2 && op < 7) {
2474		if (adapter->params.rev > 0)
2475			return G_CQ_INDEX(val);
2476
2477		t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2478			     V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2479		if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2480				    F_CONTEXT_CMD_BUSY, 0,
2481				    SG_CONTEXT_CMD_ATTEMPTS, 1))
2482			return -EIO;
2483		return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2484	}
2485	return 0;
2486}
2487
2488/**
2489 *	t3_config_rss - configure Rx packet steering
2490 *	@adapter: the adapter
2491 *	@rss_config: RSS settings (written to TP_RSS_CONFIG)
2492 *	@cpus: values for the CPU lookup table (0xff terminated)
2493 *	@rspq: values for the response queue lookup table (0xffff terminated)
2494 *
2495 *	Programs the receive packet steering logic.  @cpus and @rspq provide
2496 *	the values for the CPU and response queue lookup tables.  If they
2497 *	provide fewer values than the size of the tables the supplied values
2498 *	are used repeatedly until the tables are fully populated.
2499 */
2500void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2501		   const u8 * cpus, const u16 *rspq)
2502{
2503	int i, j, cpu_idx = 0, q_idx = 0;
2504
2505	if (cpus)
2506		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2507			u32 val = i << 16;
2508
2509			for (j = 0; j < 2; ++j) {
2510				val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2511				if (cpus[cpu_idx] == 0xff)
2512					cpu_idx = 0;
2513			}
2514			t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2515		}
2516
2517	if (rspq)
2518		for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2519			t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2520				     (i << 16) | rspq[q_idx++]);
2521			if (rspq[q_idx] == 0xffff)
2522				q_idx = 0;
2523		}
2524
2525	t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2526}
2527
2528/**
2529 *	t3_tp_set_offload_mode - put TP in NIC/offload mode
2530 *	@adap: the adapter
2531 *	@enable: 1 to select offload mode, 0 for regular NIC
2532 *
2533 *	Switches TP to NIC/offload mode.
2534 */
2535void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2536{
2537	if (is_offload(adap) || !enable)
2538		t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2539				 V_NICMODE(!enable));
2540}
2541
2542/**
2543 *	pm_num_pages - calculate the number of pages of the payload memory
2544 *	@mem_size: the size of the payload memory
2545 *	@pg_size: the size of each payload memory page
2546 *
2547 *	Calculate the number of pages, each of the given size, that fit in a
2548 *	memory of the specified size, respecting the HW requirement that the
2549 *	number of pages must be a multiple of 24.
2550 */
2551static inline unsigned int pm_num_pages(unsigned int mem_size,
2552					unsigned int pg_size)
2553{
2554	unsigned int n = mem_size / pg_size;
2555
2556	return n - n % 24;
2557}
2558
2559#define mem_region(adap, start, size, reg) \
2560	t3_write_reg((adap), A_ ## reg, (start)); \
2561	start += size
2562
2563/**
2564 *	partition_mem - partition memory and configure TP memory settings
2565 *	@adap: the adapter
2566 *	@p: the TP parameters
2567 *
2568 *	Partitions context and payload memory and configures TP's memory
2569 *	registers.
2570 */
2571static void partition_mem(struct adapter *adap, const struct tp_params *p)
2572{
2573	unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2574	unsigned int timers = 0, timers_shift = 22;
2575
2576	if (adap->params.rev > 0) {
2577		if (tids <= 16 * 1024) {
2578			timers = 1;
2579			timers_shift = 16;
2580		} else if (tids <= 64 * 1024) {
2581			timers = 2;
2582			timers_shift = 18;
2583		} else if (tids <= 256 * 1024) {
2584			timers = 3;
2585			timers_shift = 20;
2586		}
2587	}
2588
2589	t3_write_reg(adap, A_TP_PMM_SIZE,
2590		     p->chan_rx_size | (p->chan_tx_size >> 16));
2591
2592	t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2593	t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2594	t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2595	t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2596			 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2597
2598	t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2599	t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2600	t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2601
2602	pstructs = p->rx_num_pgs + p->tx_num_pgs;
2603	/* Add a bit of headroom and make multiple of 24 */
2604	pstructs += 48;
2605	pstructs -= pstructs % 24;
2606	t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2607
2608	m = tids * TCB_SIZE;
2609	mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2610	mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2611	t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2612	m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2613	mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2614	mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2615	mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2616	mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2617
2618	m = (m + 4095) & ~0xfff;
2619	t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2620	t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2621
2622	tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2623	m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2624	    adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2625	if (tids < m)
2626		adap->params.mc5.nservers += m - tids;
2627}
2628
2629static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2630				  u32 val)
2631{
2632	t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2633	t3_write_reg(adap, A_TP_PIO_DATA, val);
2634}
2635
2636static void tp_config(struct adapter *adap, const struct tp_params *p)
2637{
2638	t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2639		     F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2640		     F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2641	t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2642		     F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2643		     V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2644	t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2645		     V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2646		     V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2647		     F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2648	t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2649			 F_IPV6ENABLE | F_NICMODE);
2650	t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2651	t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2652	t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2653			 adap->params.rev > 0 ? F_ENABLEESND :
2654			 F_T3A_ENABLEESND);
2655
2656	t3_set_reg_field(adap, A_TP_PC_CONFIG,
2657			 F_ENABLEEPCMDAFULL,
2658			 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2659			 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2660	t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2661			 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2662			 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2663	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2664	t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2665
2666	if (adap->params.rev > 0) {
2667		tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2668		t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2669				 F_TXPACEAUTO);
2670		t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2671		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2672	} else
2673		t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2674
2675	if (adap->params.rev == T3_REV_C)
2676		t3_set_reg_field(adap, A_TP_PC_CONFIG,
2677				 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2678				 V_TABLELATENCYDELTA(4));
2679
2680	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2681	t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2682	t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2683	t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2684}
2685
2686/* Desired TP timer resolution in usec */
2687#define TP_TMR_RES 50
2688
2689/* TCP timer values in ms */
2690#define TP_DACK_TIMER 50
2691#define TP_RTO_MIN    250
2692
2693/**
2694 *	tp_set_timers - set TP timing parameters
2695 *	@adap: the adapter to set
2696 *	@core_clk: the core clock frequency in Hz
2697 *
2698 *	Set TP's timing parameters, such as the various timer resolutions and
2699 *	the TCP timer values.
2700 */
2701static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2702{
2703	unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2704	unsigned int dack_re = fls(core_clk / 5000) - 1;	/* 200us */
2705	unsigned int tstamp_re = fls(core_clk / 1000);	/* 1ms, at least */
2706	unsigned int tps = core_clk >> tre;
2707
2708	t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2709		     V_DELAYEDACKRESOLUTION(dack_re) |
2710		     V_TIMESTAMPRESOLUTION(tstamp_re));
2711	t3_write_reg(adap, A_TP_DACK_TIMER,
2712		     (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2713	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2714	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2715	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2716	t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2717	t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2718		     V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2719		     V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2720		     V_KEEPALIVEMAX(9));
2721
2722#define SECONDS * tps
2723
2724	t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2725	t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2726	t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2727	t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2728	t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2729	t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2730	t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2731	t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2732	t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2733
2734#undef SECONDS
2735}
2736
2737/**
2738 *	t3_tp_set_coalescing_size - set receive coalescing size
2739 *	@adap: the adapter
2740 *	@size: the receive coalescing size
2741 *	@psh: whether a set PSH bit should deliver coalesced data
2742 *
2743 *	Set the receive coalescing size and PSH bit handling.
2744 */
2745static int t3_tp_set_coalescing_size(struct adapter *adap,
2746				     unsigned int size, int psh)
2747{
2748	u32 val;
2749
2750	if (size > MAX_RX_COALESCING_LEN)
2751		return -EINVAL;
2752
2753	val = t3_read_reg(adap, A_TP_PARA_REG3);
2754	val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2755
2756	if (size) {
2757		val |= F_RXCOALESCEENABLE;
2758		if (psh)
2759			val |= F_RXCOALESCEPSHEN;
2760		size = min(MAX_RX_COALESCING_LEN, size);
2761		t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2762			     V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2763	}
2764	t3_write_reg(adap, A_TP_PARA_REG3, val);
2765	return 0;
2766}
2767
2768/**
2769 *	t3_tp_set_max_rxsize - set the max receive size
2770 *	@adap: the adapter
2771 *	@size: the max receive size
2772 *
2773 *	Set TP's max receive size.  This is the limit that applies when
2774 *	receive coalescing is disabled.
2775 */
2776static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2777{
2778	t3_write_reg(adap, A_TP_PARA_REG7,
2779		     V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2780}
2781
2782static void init_mtus(unsigned short mtus[])
2783{
2784	/*
2785	 * See draft-mathis-plpmtud-00.txt for the values.  The min is 88 so
2786	 * it can accommodate max size TCP/IP headers when SACK and timestamps
2787	 * are enabled and still have at least 8 bytes of payload.
2788	 */
2789	mtus[0] = 88;
2790	mtus[1] = 88;
2791	mtus[2] = 256;
2792	mtus[3] = 512;
2793	mtus[4] = 576;
2794	mtus[5] = 1024;
2795	mtus[6] = 1280;
2796	mtus[7] = 1492;
2797	mtus[8] = 1500;
2798	mtus[9] = 2002;
2799	mtus[10] = 2048;
2800	mtus[11] = 4096;
2801	mtus[12] = 4352;
2802	mtus[13] = 8192;
2803	mtus[14] = 9000;
2804	mtus[15] = 9600;
2805}
2806
2807/*
2808 * Initial congestion control parameters.
2809 */
2810static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2811{
2812	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2813	a[9] = 2;
2814	a[10] = 3;
2815	a[11] = 4;
2816	a[12] = 5;
2817	a[13] = 6;
2818	a[14] = 7;
2819	a[15] = 8;
2820	a[16] = 9;
2821	a[17] = 10;
2822	a[18] = 14;
2823	a[19] = 17;
2824	a[20] = 21;
2825	a[21] = 25;
2826	a[22] = 30;
2827	a[23] = 35;
2828	a[24] = 45;
2829	a[25] = 60;
2830	a[26] = 80;
2831	a[27] = 100;
2832	a[28] = 200;
2833	a[29] = 300;
2834	a[30] = 400;
2835	a[31] = 500;
2836
2837	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2838	b[9] = b[10] = 1;
2839	b[11] = b[12] = 2;
2840	b[13] = b[14] = b[15] = b[16] = 3;
2841	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2842	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2843	b[28] = b[29] = 6;
2844	b[30] = b[31] = 7;
2845}
2846
2847/* The minimum additive increment value for the congestion control table */
2848#define CC_MIN_INCR 2U
2849
2850/**
2851 *	t3_load_mtus - write the MTU and congestion control HW tables
2852 *	@adap: the adapter
2853 *	@mtus: the unrestricted values for the MTU table
2854 *	@alphs: the values for the congestion control alpha parameter
2855 *	@beta: the values for the congestion control beta parameter
2856 *	@mtu_cap: the maximum permitted effective MTU
2857 *
2858 *	Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2859 *	Update the high-speed congestion control table with the supplied alpha,
2860 * 	beta, and MTUs.
2861 */
2862void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2863		  unsigned short alpha[NCCTRL_WIN],
2864		  unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2865{
2866	static const unsigned int avg_pkts[NCCTRL_WIN] = {
2867		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2868		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2869		28672, 40960, 57344, 81920, 114688, 163840, 229376
2870	};
2871
2872	unsigned int i, w;
2873
2874	for (i = 0; i < NMTUS; ++i) {
2875		unsigned int mtu = min(mtus[i], mtu_cap);
2876		unsigned int log2 = fls(mtu);
2877
2878		if (!(mtu & ((1 << log2) >> 2)))	/* round */
2879			log2--;
2880		t3_write_reg(adap, A_TP_MTU_TABLE,
2881			     (i << 24) | (log2 << 16) | mtu);
2882
2883		for (w = 0; w < NCCTRL_WIN; ++w) {
2884			unsigned int inc;
2885
2886			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2887				  CC_MIN_INCR);
2888
2889			t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2890				     (w << 16) | (beta[w] << 13) | inc);
2891		}
2892	}
2893}
2894
2895/**
2896 *	t3_tp_get_mib_stats - read TP's MIB counters
2897 *	@adap: the adapter
2898 *	@tps: holds the returned counter values
2899 *
2900 *	Returns the values of TP's MIB counters.
2901 */
2902void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2903{
2904	t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2905			 sizeof(*tps) / sizeof(u32), 0);
2906}
2907
2908#define ulp_region(adap, name, start, len) \
2909	t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2910	t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2911		     (start) + (len) - 1); \
2912	start += len
2913
2914#define ulptx_region(adap, name, start, len) \
2915	t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2916	t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2917		     (start) + (len) - 1)
2918
2919static void ulp_config(struct adapter *adap, const struct tp_params *p)
2920{
2921	unsigned int m = p->chan_rx_size;
2922
2923	ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2924	ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2925	ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2926	ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2927	ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2928	ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2929	ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2930	t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2931}
2932
2933/**
2934 *	t3_set_proto_sram - set the contents of the protocol sram
2935 *	@adapter: the adapter
2936 *	@data: the protocol image
2937 *
2938 *	Write the contents of the protocol SRAM.
2939 */
2940int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2941{
2942	int i;
2943	const __be32 *buf = (const __be32 *)data;
2944
2945	for (i = 0; i < PROTO_SRAM_LINES; i++) {
2946		t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2947		t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2948		t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2949		t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2950		t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2951
2952		t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2953		if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2954			return -EIO;
2955	}
2956	t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2957
2958	return 0;
2959}
2960
2961void t3_config_trace_filter(struct adapter *adapter,
2962			    const struct trace_params *tp, int filter_index,
2963			    int invert, int enable)
2964{
2965	u32 addr, key[4], mask[4];
2966
2967	key[0] = tp->sport | (tp->sip << 16);
2968	key[1] = (tp->sip >> 16) | (tp->dport << 16);
2969	key[2] = tp->dip;
2970	key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2971
2972	mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2973	mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2974	mask[2] = tp->dip_mask;
2975	mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2976
2977	if (invert)
2978		key[3] |= (1 << 29);
2979	if (enable)
2980		key[3] |= (1 << 28);
2981
2982	addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2983	tp_wr_indirect(adapter, addr++, key[0]);
2984	tp_wr_indirect(adapter, addr++, mask[0]);
2985	tp_wr_indirect(adapter, addr++, key[1]);
2986	tp_wr_indirect(adapter, addr++, mask[1]);
2987	tp_wr_indirect(adapter, addr++, key[2]);
2988	tp_wr_indirect(adapter, addr++, mask[2]);
2989	tp_wr_indirect(adapter, addr++, key[3]);
2990	tp_wr_indirect(adapter, addr, mask[3]);
2991	t3_read_reg(adapter, A_TP_PIO_DATA);
2992}
2993
2994/**
2995 *	t3_config_sched - configure a HW traffic scheduler
2996 *	@adap: the adapter
2997 *	@kbps: target rate in Kbps
2998 *	@sched: the scheduler index
2999 *
3000 *	Configure a HW scheduler for the target rate
3001 */
3002int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3003{
3004	unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3005	unsigned int clk = adap->params.vpd.cclk * 1000;
3006	unsigned int selected_cpt = 0, selected_bpt = 0;
3007
3008	if (kbps > 0) {
3009		kbps *= 125;	/* -> bytes */
3010		for (cpt = 1; cpt <= 255; cpt++) {
3011			tps = clk / cpt;
3012			bpt = (kbps + tps / 2) / tps;
3013			if (bpt > 0 && bpt <= 255) {
3014				v = bpt * tps;
3015				delta = v >= kbps ? v - kbps : kbps - v;
3016				if (delta <= mindelta) {
3017					mindelta = delta;
3018					selected_cpt = cpt;
3019					selected_bpt = bpt;
3020				}
3021			} else if (selected_cpt)
3022				break;
3023		}
3024		if (!selected_cpt)
3025			return -EINVAL;
3026	}
3027	t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3028		     A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3029	v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3030	if (sched & 1)
3031		v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3032	else
3033		v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3034	t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3035	return 0;
3036}
3037
3038static int tp_init(struct adapter *adap, const struct tp_params *p)
3039{
3040	int busy = 0;
3041
3042	tp_config(adap, p);
3043	t3_set_vlan_accel(adap, 3, 0);
3044
3045	if (is_offload(adap)) {
3046		tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3047		t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3048		busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3049				       0, 1000, 5);
3050		if (busy)
3051			CH_ERR(adap, "TP initialization timed out\n");
3052	}
3053
3054	if (!busy)
3055		t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3056	return busy;
3057}
3058
3059/*
3060 * Perform the bits of HW initialization that are dependent on the Tx
3061 * channels being used.
3062 */
3063static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3064{
3065	int i;
3066
3067	if (chan_map != 3) {                                 /* one channel */
3068		t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3069		t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3070		t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3071			     (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3072					      F_TPTXPORT1EN | F_PORT1ACTIVE));
3073		t3_write_reg(adap, A_PM1_TX_CFG,
3074			     chan_map == 1 ? 0xffffffff : 0);
3075	} else {                                             /* two channels */
3076		t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3077		t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3078		t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3079			     V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3080		t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3081			     F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3082			     F_ENFORCEPKT);
3083		t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3084		t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3085		t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3086			     V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3087		for (i = 0; i < 16; i++)
3088			t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3089				     (i << 16) | 0x1010);
3090	}
3091}
3092
3093static int calibrate_xgm(struct adapter *adapter)
3094{
3095	if (uses_xaui(adapter)) {
3096		unsigned int v, i;
3097
3098		for (i = 0; i < 5; ++i) {
3099			t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3100			t3_read_reg(adapter, A_XGM_XAUI_IMP);
3101			msleep(1);
3102			v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3103			if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3104				t3_write_reg(adapter, A_XGM_XAUI_IMP,
3105					     V_XAUIIMP(G_CALIMP(v) >> 2));
3106				return 0;
3107			}
3108		}
3109		CH_ERR(adapter, "MAC calibration failed\n");
3110		return -1;
3111	} else {
3112		t3_write_reg(adapter, A_XGM_RGMII_IMP,
3113			     V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3114		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3115				 F_XGM_IMPSETUPDATE);
3116	}
3117	return 0;
3118}
3119
3120static void calibrate_xgm_t3b(struct adapter *adapter)
3121{
3122	if (!uses_xaui(adapter)) {
3123		t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3124			     F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3125		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3126		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3127				 F_XGM_IMPSETUPDATE);
3128		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3129				 0);
3130		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3131		t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3132	}
3133}
3134
3135struct mc7_timing_params {
3136	unsigned char ActToPreDly;
3137	unsigned char ActToRdWrDly;
3138	unsigned char PreCyc;
3139	unsigned char RefCyc[5];
3140	unsigned char BkCyc;
3141	unsigned char WrToRdDly;
3142	unsigned char RdToWrDly;
3143};
3144
3145/*
3146 * Write a value to a register and check that the write completed.  These
3147 * writes normally complete in a cycle or two, so one read should suffice.
3148 * The very first read exists to flush the posted write to the device.
3149 */
3150static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3151{
3152	t3_write_reg(adapter, addr, val);
3153	t3_read_reg(adapter, addr);	/* flush */
3154	if (!(t3_read_reg(adapter, addr) & F_BUSY))
3155		return 0;
3156	CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3157	return -EIO;
3158}
3159
3160static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3161{
3162	static const unsigned int mc7_mode[] = {
3163		0x632, 0x642, 0x652, 0x432, 0x442
3164	};
3165	static const struct mc7_timing_params mc7_timings[] = {
3166		{12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3167		{12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3168		{12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3169		{9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3170		{9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3171	};
3172
3173	u32 val;
3174	unsigned int width, density, slow, attempts;
3175	struct adapter *adapter = mc7->adapter;
3176	const struct mc7_timing_params *p = &mc7_timings[mem_type];
3177
3178	if (!mc7->size)
3179		return 0;
3180
3181	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3182	slow = val & F_SLOW;
3183	width = G_WIDTH(val);
3184	density = G_DEN(val);
3185
3186	t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3187	val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3188	msleep(1);
3189
3190	if (!slow) {
3191		t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3192		t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3193		msleep(1);
3194		if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3195		    (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3196			CH_ERR(adapter, "%s MC7 calibration timed out\n",
3197			       mc7->name);
3198			goto out_fail;
3199		}
3200	}
3201
3202	t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3203		     V_ACTTOPREDLY(p->ActToPreDly) |
3204		     V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3205		     V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3206		     V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3207
3208	t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3209		     val | F_CLKEN | F_TERM150);
3210	t3_read_reg(adapter, mc7->offset + A_MC7_CFG);	/* flush */
3211
3212	if (!slow)
3213		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3214				 F_DLLENB);
3215	udelay(1);
3216
3217	val = slow ? 3 : 6;
3218	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3219	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3220	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3221	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3222		goto out_fail;
3223
3224	if (!slow) {
3225		t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3226		t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3227		udelay(5);
3228	}
3229
3230	if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3231	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3232	    wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3233	    wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3234		       mc7_mode[mem_type]) ||
3235	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3236	    wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3237		goto out_fail;
3238
3239	/* clock value is in KHz */
3240	mc7_clock = mc7_clock * 7812 + mc7_clock / 2;	/* ns */
3241	mc7_clock /= 1000000;	/* KHz->MHz, ns->us */
3242
3243	t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3244		     F_PERREFEN | V_PREREFDIV(mc7_clock));
3245	t3_read_reg(adapter, mc7->offset + A_MC7_REF);	/* flush */
3246
3247	t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3248	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3249	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3250	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3251		     (mc7->size << width) - 1);
3252	t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3253	t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);	/* flush */
3254
3255	attempts = 50;
3256	do {
3257		msleep(250);
3258		val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3259	} while ((val & F_BUSY) && --attempts);
3260	if (val & F_BUSY) {
3261		CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3262		goto out_fail;
3263	}
3264
3265	/* Enable normal memory accesses. */
3266	t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3267	return 0;
3268
3269out_fail:
3270	return -1;
3271}
3272
3273static void config_pcie(struct adapter *adap)
3274{
3275	static const u16 ack_lat[4][6] = {
3276		{237, 416, 559, 1071, 2095, 4143},
3277		{128, 217, 289, 545, 1057, 2081},
3278		{73, 118, 154, 282, 538, 1050},
3279		{67, 107, 86, 150, 278, 534}
3280	};
3281	static const u16 rpl_tmr[4][6] = {
3282		{711, 1248, 1677, 3213, 6285, 12429},
3283		{384, 651, 867, 1635, 3171, 6243},
3284		{219, 354, 462, 846, 1614, 3150},
3285		{201, 321, 258, 450, 834, 1602}
3286	};
3287
3288	u16 val, devid;
3289	unsigned int log2_width, pldsize;
3290	unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3291
3292	pci_read_config_word(adap->pdev,
3293			     adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
3294			     &val);
3295	pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3296
3297	pci_read_config_word(adap->pdev, 0x2, &devid);
3298	if (devid == 0x37) {
3299		pci_write_config_word(adap->pdev,
3300				      adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
3301				      val & ~PCI_EXP_DEVCTL_READRQ &
3302				      ~PCI_EXP_DEVCTL_PAYLOAD);
3303		pldsize = 0;
3304	}
3305
3306	pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL,
3307			     &val);
3308
3309	fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3310	fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3311	    G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3312	log2_width = fls(adap->params.pci.width) - 1;
3313	acklat = ack_lat[log2_width][pldsize];
3314	if (val & 1)		/* check LOsEnable */
3315		acklat += fst_trn_tx * 4;
3316	rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3317
3318	if (adap->params.rev == 0)
3319		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3320				 V_T3A_ACKLAT(M_T3A_ACKLAT),
3321				 V_T3A_ACKLAT(acklat));
3322	else
3323		t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3324				 V_ACKLAT(acklat));
3325
3326	t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3327			 V_REPLAYLMT(rpllmt));
3328
3329	t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3330	t3_set_reg_field(adap, A_PCIE_CFG, 0,
3331			 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3332			 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3333}
3334
3335/*
3336 * Initialize and configure T3 HW modules.  This performs the
3337 * initialization steps that need to be done once after a card is reset.
3338 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3339 *
3340 * fw_params are passed to FW and their value is platform dependent.  Only the
3341 * top 8 bits are available for use, the rest must be 0.
3342 */
3343int t3_init_hw(struct adapter *adapter, u32 fw_params)
3344{
3345	int err = -EIO, attempts, i;
3346	const struct vpd_params *vpd = &adapter->params.vpd;
3347
3348	if (adapter->params.rev > 0)
3349		calibrate_xgm_t3b(adapter);
3350	else if (calibrate_xgm(adapter))
3351		goto out_err;
3352
3353	if (vpd->mclk) {
3354		partition_mem(adapter, &adapter->params.tp);
3355
3356		if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3357		    mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3358		    mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3359		    t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3360				adapter->params.mc5.nfilters,
3361				adapter->params.mc5.nroutes))
3362			goto out_err;
3363
3364		for (i = 0; i < 32; i++)
3365			if (clear_sge_ctxt(adapter, i, F_CQ))
3366				goto out_err;
3367	}
3368
3369	if (tp_init(adapter, &adapter->params.tp))
3370		goto out_err;
3371
3372	t3_tp_set_coalescing_size(adapter,
3373				  min(adapter->params.sge.max_pkt_size,
3374				      MAX_RX_COALESCING_LEN), 1);
3375	t3_tp_set_max_rxsize(adapter,
3376			     min(adapter->params.sge.max_pkt_size, 16384U));
3377	ulp_config(adapter, &adapter->params.tp);
3378
3379	if (is_pcie(adapter))
3380		config_pcie(adapter);
3381	else
3382		t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3383				 F_DMASTOPEN | F_CLIDECEN);
3384
3385	if (adapter->params.rev == T3_REV_C)
3386		t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3387				 F_CFG_CQE_SOP_MASK);
3388
3389	t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3390	t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3391	t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3392	chan_init_hw(adapter, adapter->params.chan_map);
3393	t3_sge_init(adapter, &adapter->params.sge);
3394	t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3395
3396	t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3397
3398	t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3399	t3_write_reg(adapter, A_CIM_BOOT_CFG,
3400		     V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3401	t3_read_reg(adapter, A_CIM_BOOT_CFG);	/* flush */
3402
3403	attempts = 100;
3404	do {			/* wait for uP to initialize */
3405		msleep(20);
3406	} while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3407	if (!attempts) {
3408		CH_ERR(adapter, "uP initialization timed out\n");
3409		goto out_err;
3410	}
3411
3412	err = 0;
3413out_err:
3414	return err;
3415}
3416
3417/**
3418 *	get_pci_mode - determine a card's PCI mode
3419 *	@adapter: the adapter
3420 *	@p: where to store the PCI settings
3421 *
3422 *	Determines a card's PCI mode and associated parameters, such as speed
3423 *	and width.
3424 */
3425static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3426{
3427	static unsigned short speed_map[] = { 33, 66, 100, 133 };
3428	u32 pci_mode, pcie_cap;
3429
3430	pcie_cap = pci_pcie_cap(adapter->pdev);
3431	if (pcie_cap) {
3432		u16 val;
3433
3434		p->variant = PCI_VARIANT_PCIE;
3435		pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3436					&val);
3437		p->width = (val >> 4) & 0x3f;
3438		return;
3439	}
3440
3441	pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3442	p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3443	p->width = (pci_mode & F_64BIT) ? 64 : 32;
3444	pci_mode = G_PCIXINITPAT(pci_mode);
3445	if (pci_mode == 0)
3446		p->variant = PCI_VARIANT_PCI;
3447	else if (pci_mode < 4)
3448		p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3449	else if (pci_mode < 8)
3450		p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3451	else
3452		p->variant = PCI_VARIANT_PCIX_266_MODE2;
3453}
3454
3455/**
3456 *	init_link_config - initialize a link's SW state
3457 *	@lc: structure holding the link state
3458 *	@ai: information about the current card
3459 *
3460 *	Initializes the SW state maintained for each link, including the link's
3461 *	capabilities and default speed/duplex/flow-control/autonegotiation
3462 *	settings.
3463 */
3464static void init_link_config(struct link_config *lc, unsigned int caps)
3465{
3466	lc->supported = caps;
3467	lc->requested_speed = lc->speed = SPEED_INVALID;
3468	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3469	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3470	if (lc->supported & SUPPORTED_Autoneg) {
3471		lc->advertising = lc->supported;
3472		lc->autoneg = AUTONEG_ENABLE;
3473		lc->requested_fc |= PAUSE_AUTONEG;
3474	} else {
3475		lc->advertising = 0;
3476		lc->autoneg = AUTONEG_DISABLE;
3477	}
3478}
3479
3480/**
3481 *	mc7_calc_size - calculate MC7 memory size
3482 *	@cfg: the MC7 configuration
3483 *
3484 *	Calculates the size of an MC7 memory in bytes from the value of its
3485 *	configuration register.
3486 */
3487static unsigned int mc7_calc_size(u32 cfg)
3488{
3489	unsigned int width = G_WIDTH(cfg);
3490	unsigned int banks = !!(cfg & F_BKS) + 1;
3491	unsigned int org = !!(cfg & F_ORG) + 1;
3492	unsigned int density = G_DEN(cfg);
3493	unsigned int MBs = ((256 << density) * banks) / (org << width);
3494
3495	return MBs << 20;
3496}
3497
3498static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3499		     unsigned int base_addr, const char *name)
3500{
3501	u32 cfg;
3502
3503	mc7->adapter = adapter;
3504	mc7->name = name;
3505	mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3506	cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3507	mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3508	mc7->width = G_WIDTH(cfg);
3509}
3510
3511static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3512{
3513	u16 devid;
3514
3515	mac->adapter = adapter;
3516	pci_read_config_word(adapter->pdev, 0x2, &devid);
3517
3518	if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3519		index = 0;
3520	mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3521	mac->nucast = 1;
3522
3523	if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3524		t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3525			     is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3526		t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3527				 F_ENRGMII, 0);
3528	}
3529}
3530
3531static void early_hw_init(struct adapter *adapter,
3532			  const struct adapter_info *ai)
3533{
3534	u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3535
3536	mi1_init(adapter, ai);
3537	t3_write_reg(adapter, A_I2C_CFG,	/* set for 80KHz */
3538		     V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3539	t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3540		     ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3541	t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3542	t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3543
3544	if (adapter->params.rev == 0 || !uses_xaui(adapter))
3545		val |= F_ENRGMII;
3546
3547	/* Enable MAC clocks so we can access the registers */
3548	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3549	t3_read_reg(adapter, A_XGM_PORT_CFG);
3550
3551	val |= F_CLKDIVRESET_;
3552	t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3553	t3_read_reg(adapter, A_XGM_PORT_CFG);
3554	t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3555	t3_read_reg(adapter, A_XGM_PORT_CFG);
3556}
3557
3558/*
3559 * Reset the adapter.
3560 * Older PCIe cards lose their config space during reset, PCI-X
3561 * ones don't.
3562 */
3563int t3_reset_adapter(struct adapter *adapter)
3564{
3565	int i, save_and_restore_pcie =
3566	    adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3567	uint16_t devid = 0;
3568
3569	if (save_and_restore_pcie)
3570		pci_save_state(adapter->pdev);
3571	t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3572
3573	/*
3574	 * Delay. Give Some time to device to reset fully.
3575	 * XXX The delay time should be modified.
3576	 */
3577	for (i = 0; i < 10; i++) {
3578		msleep(50);
3579		pci_read_config_word(adapter->pdev, 0x00, &devid);
3580		if (devid == 0x1425)
3581			break;
3582	}
3583
3584	if (devid != 0x1425)
3585		return -1;
3586
3587	if (save_and_restore_pcie)
3588		pci_restore_state(adapter->pdev);
3589	return 0;
3590}
3591
3592static int init_parity(struct adapter *adap)
3593{
3594		int i, err, addr;
3595
3596	if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3597		return -EBUSY;
3598
3599	for (err = i = 0; !err && i < 16; i++)
3600		err = clear_sge_ctxt(adap, i, F_EGRESS);
3601	for (i = 0xfff0; !err && i <= 0xffff; i++)
3602		err = clear_sge_ctxt(adap, i, F_EGRESS);
3603	for (i = 0; !err && i < SGE_QSETS; i++)
3604		err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3605	if (err)
3606		return err;
3607
3608	t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3609	for (i = 0; i < 4; i++)
3610		for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3611			t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3612				     F_IBQDBGWR | V_IBQDBGQID(i) |
3613				     V_IBQDBGADDR(addr));
3614			err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3615					      F_IBQDBGBUSY, 0, 2, 1);
3616			if (err)
3617				return err;
3618		}
3619	return 0;
3620}
3621
3622/*
3623 * Initialize adapter SW state for the various HW modules, set initial values
3624 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3625 * interface.
3626 */
3627int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3628		    int reset)
3629{
3630	int ret;
3631	unsigned int i, j = -1;
3632
3633	get_pci_mode(adapter, &adapter->params.pci);
3634
3635	adapter->params.info = ai;
3636	adapter->params.nports = ai->nports0 + ai->nports1;
3637	adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3638	adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3639	/*
3640	 * We used to only run the "adapter check task" once a second if
3641	 * we had PHYs which didn't support interrupts (we would check
3642	 * their link status once a second).  Now we check other conditions
3643	 * in that routine which could potentially impose a very high
3644	 * interrupt load on the system.  As such, we now always scan the
3645	 * adapter state once a second ...
3646	 */
3647	adapter->params.linkpoll_period = 10;
3648	adapter->params.stats_update_period = is_10G(adapter) ?
3649	    MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3650	adapter->params.pci.vpd_cap_addr =
3651	    pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3652	ret = get_vpd_params(adapter, &adapter->params.vpd);
3653	if (ret < 0)
3654		return ret;
3655
3656	if (reset && t3_reset_adapter(adapter))
3657		return -1;
3658
3659	t3_sge_prep(adapter, &adapter->params.sge);
3660
3661	if (adapter->params.vpd.mclk) {
3662		struct tp_params *p = &adapter->params.tp;
3663
3664		mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3665		mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3666		mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3667
3668		p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3669		p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3670		p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3671		p->cm_size = t3_mc7_size(&adapter->cm);
3672		p->chan_rx_size = p->pmrx_size / 2;	/* only 1 Rx channel */
3673		p->chan_tx_size = p->pmtx_size / p->nchan;
3674		p->rx_pg_size = 64 * 1024;
3675		p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3676		p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3677		p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3678		p->ntimer_qs = p->cm_size >= (128 << 20) ||
3679		    adapter->params.rev > 0 ? 12 : 6;
3680	}
3681
3682	adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3683				  t3_mc7_size(&adapter->pmtx) &&
3684				  t3_mc7_size(&adapter->cm);
3685
3686	if (is_offload(adapter)) {
3687		adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3688		adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3689		    DEFAULT_NFILTERS : 0;
3690		adapter->params.mc5.nroutes = 0;
3691		t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3692
3693		init_mtus(adapter->params.mtus);
3694		init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3695	}
3696
3697	early_hw_init(adapter, ai);
3698	ret = init_parity(adapter);
3699	if (ret)
3700		return ret;
3701
3702	for_each_port(adapter, i) {
3703		u8 hw_addr[6];
3704		const struct port_type_info *pti;
3705		struct port_info *p = adap2pinfo(adapter, i);
3706
3707		while (!adapter->params.vpd.port_type[++j])
3708			;
3709
3710		pti = &port_types[adapter->params.vpd.port_type[j]];
3711		if (!pti->phy_prep) {
3712			CH_ALERT(adapter, "Invalid port type index %d\n",
3713				 adapter->params.vpd.port_type[j]);
3714			return -EINVAL;
3715		}
3716
3717		p->phy.mdio.dev = adapter->port[i];
3718		ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3719				    ai->mdio_ops);
3720		if (ret)
3721			return ret;
3722		mac_prep(&p->mac, adapter, j);
3723
3724		/*
3725		 * The VPD EEPROM stores the base Ethernet address for the
3726		 * card.  A port's address is derived from the base by adding
3727		 * the port's index to the base's low octet.
3728		 */
3729		memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3730		hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3731
3732		memcpy(adapter->port[i]->dev_addr, hw_addr,
3733		       ETH_ALEN);
3734		memcpy(adapter->port[i]->perm_addr, hw_addr,
3735		       ETH_ALEN);
3736		init_link_config(&p->link_config, p->phy.caps);
3737		p->phy.ops->power_down(&p->phy, 1);
3738
3739		/*
3740		 * If the PHY doesn't support interrupts for link status
3741		 * changes, schedule a scan of the adapter links at least
3742		 * once a second.
3743		 */
3744		if (!(p->phy.caps & SUPPORTED_IRQ) &&
3745		    adapter->params.linkpoll_period > 10)
3746			adapter->params.linkpoll_period = 10;
3747	}
3748
3749	return 0;
3750}
3751
3752void t3_led_ready(struct adapter *adapter)
3753{
3754	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3755			 F_GPIO0_OUT_VAL);
3756}
3757
3758int t3_replay_prep_adapter(struct adapter *adapter)
3759{
3760	const struct adapter_info *ai = adapter->params.info;
3761	unsigned int i, j = -1;
3762	int ret;
3763
3764	early_hw_init(adapter, ai);
3765	ret = init_parity(adapter);
3766	if (ret)
3767		return ret;
3768
3769	for_each_port(adapter, i) {
3770		const struct port_type_info *pti;
3771		struct port_info *p = adap2pinfo(adapter, i);
3772
3773		while (!adapter->params.vpd.port_type[++j])
3774			;
3775
3776		pti = &port_types[adapter->params.vpd.port_type[j]];
3777		ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3778		if (ret)
3779			return ret;
3780		p->phy.ops->power_down(&p->phy, 1);
3781	}
3782
3783return 0;
3784}
3785
3786