bnx2x_main.c revision a8f47eb701a562f6b5c81e2e0c143148915d7913
1/* bnx2x_main.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20#include <linux/module.h> 21#include <linux/moduleparam.h> 22#include <linux/kernel.h> 23#include <linux/device.h> /* for dev_info() */ 24#include <linux/timer.h> 25#include <linux/errno.h> 26#include <linux/ioport.h> 27#include <linux/slab.h> 28#include <linux/interrupt.h> 29#include <linux/pci.h> 30#include <linux/aer.h> 31#include <linux/init.h> 32#include <linux/netdevice.h> 33#include <linux/etherdevice.h> 34#include <linux/skbuff.h> 35#include <linux/dma-mapping.h> 36#include <linux/bitops.h> 37#include <linux/irq.h> 38#include <linux/delay.h> 39#include <asm/byteorder.h> 40#include <linux/time.h> 41#include <linux/ethtool.h> 42#include <linux/mii.h> 43#include <linux/if_vlan.h> 44#include <net/ip.h> 45#include <net/ipv6.h> 46#include <net/tcp.h> 47#include <net/checksum.h> 48#include <net/ip6_checksum.h> 49#include <linux/workqueue.h> 50#include <linux/crc32.h> 51#include <linux/crc32c.h> 52#include <linux/prefetch.h> 53#include <linux/zlib.h> 54#include <linux/io.h> 55#include <linux/semaphore.h> 56#include <linux/stringify.h> 57#include <linux/vmalloc.h> 58 59#include "bnx2x.h" 60#include "bnx2x_init.h" 61#include "bnx2x_init_ops.h" 62#include "bnx2x_cmn.h" 63#include "bnx2x_vfpf.h" 64#include "bnx2x_dcb.h" 65#include "bnx2x_sp.h" 66 67#include <linux/firmware.h> 68#include "bnx2x_fw_file_hdr.h" 69/* FW files */ 70#define FW_FILE_VERSION \ 71 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ 72 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 73 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 74 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 75#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" 76#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 77#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 78 79/* Time in jiffies before concluding the transmitter is hung */ 80#define TX_TIMEOUT (5*HZ) 81 82static char version[] = 83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " 84 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 85 86MODULE_AUTHOR("Eliezer Tamir"); 87MODULE_DESCRIPTION("Broadcom NetXtreme II " 88 "BCM57710/57711/57711E/" 89 "57712/57712_MF/57800/57800_MF/57810/57810_MF/" 90 "57840/57840_MF Driver"); 91MODULE_LICENSE("GPL"); 92MODULE_VERSION(DRV_MODULE_VERSION); 93MODULE_FIRMWARE(FW_FILE_NAME_E1); 94MODULE_FIRMWARE(FW_FILE_NAME_E1H); 95MODULE_FIRMWARE(FW_FILE_NAME_E2); 96 97int bnx2x_num_queues; 98module_param_named(num_queues, bnx2x_num_queues, int, 0); 99MODULE_PARM_DESC(num_queues, 100 " Set number of queues (default is as a number of CPUs)"); 101 102static int disable_tpa; 103module_param(disable_tpa, int, 0); 104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); 105 106static int int_mode; 107module_param(int_mode, int, 0); 108MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " 109 "(1 INT#x; 2 MSI)"); 110 111static int dropless_fc; 112module_param(dropless_fc, int, 0); 113MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); 114 115static int mrrs = -1; 116module_param(mrrs, int, 0); 117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); 118 119static int debug; 120module_param(debug, int, 0); 121MODULE_PARM_DESC(debug, " Default debug msglevel"); 122 123struct workqueue_struct *bnx2x_wq; 124 125struct bnx2x_mac_vals { 126 u32 xmac_addr; 127 u32 xmac_val; 128 u32 emac_addr; 129 u32 emac_val; 130 u32 umac_addr; 131 u32 umac_val; 132 u32 bmac_addr; 133 u32 bmac_val[2]; 134}; 135 136enum bnx2x_board_type { 137 BCM57710 = 0, 138 BCM57711, 139 BCM57711E, 140 BCM57712, 141 BCM57712_MF, 142 BCM57712_VF, 143 BCM57800, 144 BCM57800_MF, 145 BCM57800_VF, 146 BCM57810, 147 BCM57810_MF, 148 BCM57810_VF, 149 BCM57840_4_10, 150 BCM57840_2_20, 151 BCM57840_MF, 152 BCM57840_VF, 153 BCM57811, 154 BCM57811_MF, 155 BCM57840_O, 156 BCM57840_MFO, 157 BCM57811_VF 158}; 159 160/* indexed by board_type, above */ 161static struct { 162 char *name; 163} board_info[] = { 164 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, 165 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, 166 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, 167 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, 168 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, 169 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" }, 170 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, 171 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, 172 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" }, 173 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 174 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 175 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" }, 176 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" }, 177 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" }, 178 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, 179 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }, 180 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" }, 181 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" }, 182 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 183 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" }, 184 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" } 185}; 186 187#ifndef PCI_DEVICE_ID_NX2_57710 188#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 189#endif 190#ifndef PCI_DEVICE_ID_NX2_57711 191#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 192#endif 193#ifndef PCI_DEVICE_ID_NX2_57711E 194#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E 195#endif 196#ifndef PCI_DEVICE_ID_NX2_57712 197#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 198#endif 199#ifndef PCI_DEVICE_ID_NX2_57712_MF 200#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF 201#endif 202#ifndef PCI_DEVICE_ID_NX2_57712_VF 203#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF 204#endif 205#ifndef PCI_DEVICE_ID_NX2_57800 206#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 207#endif 208#ifndef PCI_DEVICE_ID_NX2_57800_MF 209#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF 210#endif 211#ifndef PCI_DEVICE_ID_NX2_57800_VF 212#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF 213#endif 214#ifndef PCI_DEVICE_ID_NX2_57810 215#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 216#endif 217#ifndef PCI_DEVICE_ID_NX2_57810_MF 218#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF 219#endif 220#ifndef PCI_DEVICE_ID_NX2_57840_O 221#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE 222#endif 223#ifndef PCI_DEVICE_ID_NX2_57810_VF 224#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF 225#endif 226#ifndef PCI_DEVICE_ID_NX2_57840_4_10 227#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10 228#endif 229#ifndef PCI_DEVICE_ID_NX2_57840_2_20 230#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20 231#endif 232#ifndef PCI_DEVICE_ID_NX2_57840_MFO 233#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE 234#endif 235#ifndef PCI_DEVICE_ID_NX2_57840_MF 236#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 237#endif 238#ifndef PCI_DEVICE_ID_NX2_57840_VF 239#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF 240#endif 241#ifndef PCI_DEVICE_ID_NX2_57811 242#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811 243#endif 244#ifndef PCI_DEVICE_ID_NX2_57811_MF 245#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF 246#endif 247#ifndef PCI_DEVICE_ID_NX2_57811_VF 248#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF 249#endif 250 251static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 252 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 253 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 254 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 255 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, 256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, 257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF }, 258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, 259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, 260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF }, 261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, 262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O }, 264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 }, 265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 }, 266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF }, 267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO }, 268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF }, 270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 }, 271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF }, 272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF }, 273 { 0 } 274}; 275 276MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); 277 278/* Global resources for unloading a previously loaded device */ 279#define BNX2X_PREV_WAIT_NEEDED 1 280static DEFINE_SEMAPHORE(bnx2x_prev_sem); 281static LIST_HEAD(bnx2x_prev_list); 282 283/* Forward declaration */ 284static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); 285static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); 286static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); 287 288/**************************************************************************** 289* General service functions 290****************************************************************************/ 291 292static void __storm_memset_dma_mapping(struct bnx2x *bp, 293 u32 addr, dma_addr_t mapping) 294{ 295 REG_WR(bp, addr, U64_LO(mapping)); 296 REG_WR(bp, addr + 4, U64_HI(mapping)); 297} 298 299static void storm_memset_spq_addr(struct bnx2x *bp, 300 dma_addr_t mapping, u16 abs_fid) 301{ 302 u32 addr = XSEM_REG_FAST_MEMORY + 303 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 304 305 __storm_memset_dma_mapping(bp, addr, mapping); 306} 307 308static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 309 u16 pf_id) 310{ 311 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 312 pf_id); 313 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 314 pf_id); 315 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 316 pf_id); 317 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 318 pf_id); 319} 320 321static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 322 u8 enable) 323{ 324 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 325 enable); 326 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 327 enable); 328 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 329 enable); 330 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 331 enable); 332} 333 334static void storm_memset_eq_data(struct bnx2x *bp, 335 struct event_ring_data *eq_data, 336 u16 pfid) 337{ 338 size_t size = sizeof(struct event_ring_data); 339 340 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid); 341 342 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 343} 344 345static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 346 u16 pfid) 347{ 348 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 349 REG_WR16(bp, addr, eq_prod); 350} 351 352/* used only at init 353 * locking is done by mcp 354 */ 355static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) 356{ 357 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 358 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); 359 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 360 PCICFG_VENDOR_ID_OFFSET); 361} 362 363static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr) 364{ 365 u32 val; 366 367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); 368 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val); 369 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 370 PCICFG_VENDOR_ID_OFFSET); 371 372 return val; 373} 374 375#define DMAE_DP_SRC_GRC "grc src_addr [%08x]" 376#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]" 377#define DMAE_DP_DST_GRC "grc dst_addr [%08x]" 378#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 379#define DMAE_DP_DST_NONE "dst_addr [none]" 380 381static void bnx2x_dp_dmae(struct bnx2x *bp, 382 struct dmae_command *dmae, int msglvl) 383{ 384 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC; 385 int i; 386 387 switch (dmae->opcode & DMAE_COMMAND_DST) { 388 case DMAE_CMD_DST_PCI: 389 if (src_type == DMAE_CMD_SRC_PCI) 390 DP(msglvl, "DMAE: opcode 0x%08x\n" 391 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" 392 "comp_addr [%x:%08x], comp_val 0x%08x\n", 393 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 394 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 395 dmae->comp_addr_hi, dmae->comp_addr_lo, 396 dmae->comp_val); 397 else 398 DP(msglvl, "DMAE: opcode 0x%08x\n" 399 "src [%08x], len [%d*4], dst [%x:%08x]\n" 400 "comp_addr [%x:%08x], comp_val 0x%08x\n", 401 dmae->opcode, dmae->src_addr_lo >> 2, 402 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, 403 dmae->comp_addr_hi, dmae->comp_addr_lo, 404 dmae->comp_val); 405 break; 406 case DMAE_CMD_DST_GRC: 407 if (src_type == DMAE_CMD_SRC_PCI) 408 DP(msglvl, "DMAE: opcode 0x%08x\n" 409 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" 410 "comp_addr [%x:%08x], comp_val 0x%08x\n", 411 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 412 dmae->len, dmae->dst_addr_lo >> 2, 413 dmae->comp_addr_hi, dmae->comp_addr_lo, 414 dmae->comp_val); 415 else 416 DP(msglvl, "DMAE: opcode 0x%08x\n" 417 "src [%08x], len [%d*4], dst [%08x]\n" 418 "comp_addr [%x:%08x], comp_val 0x%08x\n", 419 dmae->opcode, dmae->src_addr_lo >> 2, 420 dmae->len, dmae->dst_addr_lo >> 2, 421 dmae->comp_addr_hi, dmae->comp_addr_lo, 422 dmae->comp_val); 423 break; 424 default: 425 if (src_type == DMAE_CMD_SRC_PCI) 426 DP(msglvl, "DMAE: opcode 0x%08x\n" 427 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" 428 "comp_addr [%x:%08x] comp_val 0x%08x\n", 429 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, 430 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 431 dmae->comp_val); 432 else 433 DP(msglvl, "DMAE: opcode 0x%08x\n" 434 "src_addr [%08x] len [%d * 4] dst_addr [none]\n" 435 "comp_addr [%x:%08x] comp_val 0x%08x\n", 436 dmae->opcode, dmae->src_addr_lo >> 2, 437 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, 438 dmae->comp_val); 439 break; 440 } 441 442 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) 443 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n", 444 i, *(((u32 *)dmae) + i)); 445} 446 447/* copy command into DMAE command memory and set DMAE command go */ 448void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 449{ 450 u32 cmd_offset; 451 int i; 452 453 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx); 454 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) { 455 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i)); 456 } 457 REG_WR(bp, dmae_reg_go_c[idx], 1); 458} 459 460u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type) 461{ 462 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) | 463 DMAE_CMD_C_ENABLE); 464} 465 466u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode) 467{ 468 return opcode & ~DMAE_CMD_SRC_RESET; 469} 470 471u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, 472 bool with_comp, u8 comp_type) 473{ 474 u32 opcode = 0; 475 476 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) | 477 (dst_type << DMAE_COMMAND_DST_SHIFT)); 478 479 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); 480 481 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); 482 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) | 483 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT)); 484 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT); 485 486#ifdef __BIG_ENDIAN 487 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; 488#else 489 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; 490#endif 491 if (with_comp) 492 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type); 493 return opcode; 494} 495 496void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, 497 struct dmae_command *dmae, 498 u8 src_type, u8 dst_type) 499{ 500 memset(dmae, 0, sizeof(struct dmae_command)); 501 502 /* set the opcode */ 503 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type, 504 true, DMAE_COMP_PCI); 505 506 /* fill in the completion parameters */ 507 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 508 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 509 dmae->comp_val = DMAE_COMP_VAL; 510} 511 512/* issue a dmae command over the init-channel and wait for completion */ 513int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, 514 u32 *comp) 515{ 516 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000; 517 int rc = 0; 518 519 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE); 520 521 /* Lock the dmae channel. Disable BHs to prevent a dead-lock 522 * as long as this code is called both from syscall context and 523 * from ndo_set_rx_mode() flow that may be called from BH. 524 */ 525 spin_lock_bh(&bp->dmae_lock); 526 527 /* reset completion */ 528 *comp = 0; 529 530 /* post the command on the channel used for initializations */ 531 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); 532 533 /* wait for completion */ 534 udelay(5); 535 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { 536 537 if (!cnt || 538 (bp->recovery_state != BNX2X_RECOVERY_DONE && 539 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 540 BNX2X_ERR("DMAE timeout!\n"); 541 rc = DMAE_TIMEOUT; 542 goto unlock; 543 } 544 cnt--; 545 udelay(50); 546 } 547 if (*comp & DMAE_PCI_ERR_FLAG) { 548 BNX2X_ERR("DMAE PCI error!\n"); 549 rc = DMAE_PCI_ERROR; 550 } 551 552unlock: 553 spin_unlock_bh(&bp->dmae_lock); 554 return rc; 555} 556 557void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 558 u32 len32) 559{ 560 int rc; 561 struct dmae_command dmae; 562 563 if (!bp->dmae_ready) { 564 u32 *data = bnx2x_sp(bp, wb_data[0]); 565 566 if (CHIP_IS_E1(bp)) 567 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 568 else 569 bnx2x_init_str_wr(bp, dst_addr, data, len32); 570 return; 571 } 572 573 /* set opcode and fixed command fields */ 574 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); 575 576 /* fill in addresses and len */ 577 dmae.src_addr_lo = U64_LO(dma_addr); 578 dmae.src_addr_hi = U64_HI(dma_addr); 579 dmae.dst_addr_lo = dst_addr >> 2; 580 dmae.dst_addr_hi = 0; 581 dmae.len = len32; 582 583 /* issue the command and wait for completion */ 584 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 585 if (rc) { 586 BNX2X_ERR("DMAE returned failure %d\n", rc); 587#ifdef BNX2X_STOP_ON_ERROR 588 bnx2x_panic(); 589#endif 590 } 591} 592 593void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 594{ 595 int rc; 596 struct dmae_command dmae; 597 598 if (!bp->dmae_ready) { 599 u32 *data = bnx2x_sp(bp, wb_data[0]); 600 int i; 601 602 if (CHIP_IS_E1(bp)) 603 for (i = 0; i < len32; i++) 604 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4); 605 else 606 for (i = 0; i < len32; i++) 607 data[i] = REG_RD(bp, src_addr + i*4); 608 609 return; 610 } 611 612 /* set opcode and fixed command fields */ 613 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); 614 615 /* fill in addresses and len */ 616 dmae.src_addr_lo = src_addr >> 2; 617 dmae.src_addr_hi = 0; 618 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 619 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 620 dmae.len = len32; 621 622 /* issue the command and wait for completion */ 623 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp)); 624 if (rc) { 625 BNX2X_ERR("DMAE returned failure %d\n", rc); 626#ifdef BNX2X_STOP_ON_ERROR 627 bnx2x_panic(); 628#endif 629 } 630} 631 632static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 633 u32 addr, u32 len) 634{ 635 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp); 636 int offset = 0; 637 638 while (len > dmae_wr_max) { 639 bnx2x_write_dmae(bp, phys_addr + offset, 640 addr + offset, dmae_wr_max); 641 offset += dmae_wr_max * 4; 642 len -= dmae_wr_max; 643 } 644 645 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 646} 647 648static int bnx2x_mc_assert(struct bnx2x *bp) 649{ 650 char last_idx; 651 int i, rc = 0; 652 u32 row0, row1, row2, row3; 653 654 /* XSTORM */ 655 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + 656 XSTORM_ASSERT_LIST_INDEX_OFFSET); 657 if (last_idx) 658 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 659 660 /* print the asserts */ 661 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 662 663 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + 664 XSTORM_ASSERT_LIST_OFFSET(i)); 665 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + 666 XSTORM_ASSERT_LIST_OFFSET(i) + 4); 667 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + 668 XSTORM_ASSERT_LIST_OFFSET(i) + 8); 669 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + 670 XSTORM_ASSERT_LIST_OFFSET(i) + 12); 671 672 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 673 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 674 i, row3, row2, row1, row0); 675 rc++; 676 } else { 677 break; 678 } 679 } 680 681 /* TSTORM */ 682 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + 683 TSTORM_ASSERT_LIST_INDEX_OFFSET); 684 if (last_idx) 685 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 686 687 /* print the asserts */ 688 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 689 690 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + 691 TSTORM_ASSERT_LIST_OFFSET(i)); 692 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + 693 TSTORM_ASSERT_LIST_OFFSET(i) + 4); 694 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + 695 TSTORM_ASSERT_LIST_OFFSET(i) + 8); 696 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + 697 TSTORM_ASSERT_LIST_OFFSET(i) + 12); 698 699 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 700 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 701 i, row3, row2, row1, row0); 702 rc++; 703 } else { 704 break; 705 } 706 } 707 708 /* CSTORM */ 709 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + 710 CSTORM_ASSERT_LIST_INDEX_OFFSET); 711 if (last_idx) 712 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 713 714 /* print the asserts */ 715 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 716 717 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + 718 CSTORM_ASSERT_LIST_OFFSET(i)); 719 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + 720 CSTORM_ASSERT_LIST_OFFSET(i) + 4); 721 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + 722 CSTORM_ASSERT_LIST_OFFSET(i) + 8); 723 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + 724 CSTORM_ASSERT_LIST_OFFSET(i) + 12); 725 726 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 727 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 728 i, row3, row2, row1, row0); 729 rc++; 730 } else { 731 break; 732 } 733 } 734 735 /* USTORM */ 736 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + 737 USTORM_ASSERT_LIST_INDEX_OFFSET); 738 if (last_idx) 739 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); 740 741 /* print the asserts */ 742 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { 743 744 row0 = REG_RD(bp, BAR_USTRORM_INTMEM + 745 USTORM_ASSERT_LIST_OFFSET(i)); 746 row1 = REG_RD(bp, BAR_USTRORM_INTMEM + 747 USTORM_ASSERT_LIST_OFFSET(i) + 4); 748 row2 = REG_RD(bp, BAR_USTRORM_INTMEM + 749 USTORM_ASSERT_LIST_OFFSET(i) + 8); 750 row3 = REG_RD(bp, BAR_USTRORM_INTMEM + 751 USTORM_ASSERT_LIST_OFFSET(i) + 12); 752 753 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 754 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", 755 i, row3, row2, row1, row0); 756 rc++; 757 } else { 758 break; 759 } 760 } 761 762 return rc; 763} 764 765#define MCPR_TRACE_BUFFER_SIZE (0x800) 766#define SCRATCH_BUFFER_SIZE(bp) \ 767 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) 768 769void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) 770{ 771 u32 addr, val; 772 u32 mark, offset; 773 __be32 data[9]; 774 int word; 775 u32 trace_shmem_base; 776 if (BP_NOMCP(bp)) { 777 BNX2X_ERR("NO MCP - can not dump\n"); 778 return; 779 } 780 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", 781 (bp->common.bc_ver & 0xff0000) >> 16, 782 (bp->common.bc_ver & 0xff00) >> 8, 783 (bp->common.bc_ver & 0xff)); 784 785 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); 786 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) 787 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); 788 789 if (BP_PATH(bp) == 0) 790 trace_shmem_base = bp->common.shmem_base; 791 else 792 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); 793 794 /* sanity */ 795 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || 796 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + 797 SCRATCH_BUFFER_SIZE(bp)) { 798 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", 799 trace_shmem_base); 800 return; 801 } 802 803 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE; 804 805 /* validate TRCB signature */ 806 mark = REG_RD(bp, addr); 807 if (mark != MFW_TRACE_SIGNATURE) { 808 BNX2X_ERR("Trace buffer signature is missing."); 809 return ; 810 } 811 812 /* read cyclic buffer pointer */ 813 addr += 4; 814 mark = REG_RD(bp, addr); 815 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; 816 if (mark >= trace_shmem_base || mark < addr + 4) { 817 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); 818 return; 819 } 820 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); 821 822 printk("%s", lvl); 823 824 /* dump buffer after the mark */ 825 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) { 826 for (word = 0; word < 8; word++) 827 data[word] = htonl(REG_RD(bp, offset + 4*word)); 828 data[8] = 0x0; 829 pr_cont("%s", (char *)data); 830 } 831 832 /* dump buffer before the mark */ 833 for (offset = addr + 4; offset <= mark; offset += 0x8*4) { 834 for (word = 0; word < 8; word++) 835 data[word] = htonl(REG_RD(bp, offset + 4*word)); 836 data[8] = 0x0; 837 pr_cont("%s", (char *)data); 838 } 839 printk("%s" "end of fw dump\n", lvl); 840} 841 842static void bnx2x_fw_dump(struct bnx2x *bp) 843{ 844 bnx2x_fw_dump_lvl(bp, KERN_ERR); 845} 846 847static void bnx2x_hc_int_disable(struct bnx2x *bp) 848{ 849 int port = BP_PORT(bp); 850 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 851 u32 val = REG_RD(bp, addr); 852 853 /* in E1 we must use only PCI configuration space to disable 854 * MSI/MSIX capability 855 * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block 856 */ 857 if (CHIP_IS_E1(bp)) { 858 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on 859 * Use mask register to prevent from HC sending interrupts 860 * after we exit the function 861 */ 862 REG_WR(bp, HC_REG_INT_MASK + port*4, 0); 863 864 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 865 HC_CONFIG_0_REG_INT_LINE_EN_0 | 866 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 867 } else 868 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 869 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 870 HC_CONFIG_0_REG_INT_LINE_EN_0 | 871 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 872 873 DP(NETIF_MSG_IFDOWN, 874 "write %x to HC %d (addr 0x%x)\n", 875 val, port, addr); 876 877 /* flush all outstanding writes */ 878 mmiowb(); 879 880 REG_WR(bp, addr, val); 881 if (REG_RD(bp, addr) != val) 882 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 883} 884 885static void bnx2x_igu_int_disable(struct bnx2x *bp) 886{ 887 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 888 889 val &= ~(IGU_PF_CONF_MSI_MSIX_EN | 890 IGU_PF_CONF_INT_LINE_EN | 891 IGU_PF_CONF_ATTN_BIT_EN); 892 893 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val); 894 895 /* flush all outstanding writes */ 896 mmiowb(); 897 898 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 899 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val) 900 BNX2X_ERR("BUG! Proper val not read from IGU!\n"); 901} 902 903static void bnx2x_int_disable(struct bnx2x *bp) 904{ 905 if (bp->common.int_block == INT_BLOCK_HC) 906 bnx2x_hc_int_disable(bp); 907 else 908 bnx2x_igu_int_disable(bp); 909} 910 911void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) 912{ 913 int i; 914 u16 j; 915 struct hc_sp_status_block_data sp_sb_data; 916 int func = BP_FUNC(bp); 917#ifdef BNX2X_STOP_ON_ERROR 918 u16 start = 0, end = 0; 919 u8 cos; 920#endif 921 if (disable_int) 922 bnx2x_int_disable(bp); 923 924 bp->stats_state = STATS_STATE_DISABLED; 925 bp->eth_stats.unrecoverable_error++; 926 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 927 928 BNX2X_ERR("begin crash dump -----------------\n"); 929 930 /* Indices */ 931 /* Common */ 932 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 933 bp->def_idx, bp->def_att_idx, bp->attn_state, 934 bp->spq_prod_idx, bp->stats_counter); 935 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 936 bp->def_status_blk->atten_status_block.attn_bits, 937 bp->def_status_blk->atten_status_block.attn_bits_ack, 938 bp->def_status_blk->atten_status_block.status_block_id, 939 bp->def_status_blk->atten_status_block.attn_bits_index); 940 BNX2X_ERR(" def ("); 941 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 942 pr_cont("0x%x%s", 943 bp->def_status_blk->sp_sb.index_values[i], 944 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 945 946 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 947 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + 948 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 949 i*sizeof(u32)); 950 951 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 952 sp_sb_data.igu_sb_id, 953 sp_sb_data.igu_seg_id, 954 sp_sb_data.p_func.pf_id, 955 sp_sb_data.p_func.vnic_id, 956 sp_sb_data.p_func.vf_id, 957 sp_sb_data.p_func.vf_valid, 958 sp_sb_data.state); 959 960 for_each_eth_queue(bp, i) { 961 struct bnx2x_fastpath *fp = &bp->fp[i]; 962 int loop; 963 struct hc_status_block_data_e2 sb_data_e2; 964 struct hc_status_block_data_e1x sb_data_e1x; 965 struct hc_status_block_sm *hc_sm_p = 966 CHIP_IS_E1x(bp) ? 967 sb_data_e1x.common.state_machine : 968 sb_data_e2.common.state_machine; 969 struct hc_index_data *hc_index_p = 970 CHIP_IS_E1x(bp) ? 971 sb_data_e1x.index_data : 972 sb_data_e2.index_data; 973 u8 data_size, cos; 974 u32 *sb_data_p; 975 struct bnx2x_fp_txdata txdata; 976 977 /* Rx */ 978 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 979 i, fp->rx_bd_prod, fp->rx_bd_cons, 980 fp->rx_comp_prod, 981 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 982 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", 983 fp->rx_sge_prod, fp->last_max_sge, 984 le16_to_cpu(fp->fp_hc_idx)); 985 986 /* Tx */ 987 for_each_cos_in_tx_queue(fp, cos) 988 { 989 txdata = *fp->txdata_ptr[cos]; 990 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", 991 i, txdata.tx_pkt_prod, 992 txdata.tx_pkt_cons, txdata.tx_bd_prod, 993 txdata.tx_bd_cons, 994 le16_to_cpu(*txdata.tx_cons_sb)); 995 } 996 997 loop = CHIP_IS_E1x(bp) ? 998 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; 999 1000 /* host sb data */ 1001 1002 if (IS_FCOE_FP(fp)) 1003 continue; 1004 1005 BNX2X_ERR(" run indexes ("); 1006 for (j = 0; j < HC_SB_MAX_SM; j++) 1007 pr_cont("0x%x%s", 1008 fp->sb_running_index[j], 1009 (j == HC_SB_MAX_SM - 1) ? ")" : " "); 1010 1011 BNX2X_ERR(" indexes ("); 1012 for (j = 0; j < loop; j++) 1013 pr_cont("0x%x%s", 1014 fp->sb_index_values[j], 1015 (j == loop - 1) ? ")" : " "); 1016 /* fw sb data */ 1017 data_size = CHIP_IS_E1x(bp) ? 1018 sizeof(struct hc_status_block_data_e1x) : 1019 sizeof(struct hc_status_block_data_e2); 1020 data_size /= sizeof(u32); 1021 sb_data_p = CHIP_IS_E1x(bp) ? 1022 (u32 *)&sb_data_e1x : 1023 (u32 *)&sb_data_e2; 1024 /* copy sb data in here */ 1025 for (j = 0; j < data_size; j++) 1026 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + 1027 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + 1028 j * sizeof(u32)); 1029 1030 if (!CHIP_IS_E1x(bp)) { 1031 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1032 sb_data_e2.common.p_func.pf_id, 1033 sb_data_e2.common.p_func.vf_id, 1034 sb_data_e2.common.p_func.vf_valid, 1035 sb_data_e2.common.p_func.vnic_id, 1036 sb_data_e2.common.same_igu_sb_1b, 1037 sb_data_e2.common.state); 1038 } else { 1039 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", 1040 sb_data_e1x.common.p_func.pf_id, 1041 sb_data_e1x.common.p_func.vf_id, 1042 sb_data_e1x.common.p_func.vf_valid, 1043 sb_data_e1x.common.p_func.vnic_id, 1044 sb_data_e1x.common.same_igu_sb_1b, 1045 sb_data_e1x.common.state); 1046 } 1047 1048 /* SB_SMs data */ 1049 for (j = 0; j < HC_SB_MAX_SM; j++) { 1050 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", 1051 j, hc_sm_p[j].__flags, 1052 hc_sm_p[j].igu_sb_id, 1053 hc_sm_p[j].igu_seg_id, 1054 hc_sm_p[j].time_to_expire, 1055 hc_sm_p[j].timer_value); 1056 } 1057 1058 /* Indices data */ 1059 for (j = 0; j < loop; j++) { 1060 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, 1061 hc_index_p[j].flags, 1062 hc_index_p[j].timeout); 1063 } 1064 } 1065 1066#ifdef BNX2X_STOP_ON_ERROR 1067 1068 /* event queue */ 1069 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); 1070 for (i = 0; i < NUM_EQ_DESC; i++) { 1071 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1072 1073 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", 1074 i, bp->eq_ring[i].message.opcode, 1075 bp->eq_ring[i].message.error); 1076 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); 1077 } 1078 1079 /* Rings */ 1080 /* Rx */ 1081 for_each_valid_rx_queue(bp, i) { 1082 struct bnx2x_fastpath *fp = &bp->fp[i]; 1083 1084 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 1085 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); 1086 for (j = start; j != end; j = RX_BD(j + 1)) { 1087 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; 1088 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; 1089 1090 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", 1091 i, j, rx_bd[1], rx_bd[0], sw_bd->data); 1092 } 1093 1094 start = RX_SGE(fp->rx_sge_prod); 1095 end = RX_SGE(fp->last_max_sge); 1096 for (j = start; j != end; j = RX_SGE(j + 1)) { 1097 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 1098 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 1099 1100 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", 1101 i, j, rx_sge[1], rx_sge[0], sw_page->page); 1102 } 1103 1104 start = RCQ_BD(fp->rx_comp_cons - 10); 1105 end = RCQ_BD(fp->rx_comp_cons + 503); 1106 for (j = start; j != end; j = RCQ_BD(j + 1)) { 1107 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; 1108 1109 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", 1110 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); 1111 } 1112 } 1113 1114 /* Tx */ 1115 for_each_valid_tx_queue(bp, i) { 1116 struct bnx2x_fastpath *fp = &bp->fp[i]; 1117 for_each_cos_in_tx_queue(fp, cos) { 1118 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 1119 1120 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); 1121 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); 1122 for (j = start; j != end; j = TX_BD(j + 1)) { 1123 struct sw_tx_bd *sw_bd = 1124 &txdata->tx_buf_ring[j]; 1125 1126 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", 1127 i, cos, j, sw_bd->skb, 1128 sw_bd->first_bd); 1129 } 1130 1131 start = TX_BD(txdata->tx_bd_cons - 10); 1132 end = TX_BD(txdata->tx_bd_cons + 254); 1133 for (j = start; j != end; j = TX_BD(j + 1)) { 1134 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; 1135 1136 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", 1137 i, cos, j, tx_bd[0], tx_bd[1], 1138 tx_bd[2], tx_bd[3]); 1139 } 1140 } 1141 } 1142#endif 1143 bnx2x_fw_dump(bp); 1144 bnx2x_mc_assert(bp); 1145 BNX2X_ERR("end crash dump -----------------\n"); 1146} 1147 1148/* 1149 * FLR Support for E2 1150 * 1151 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW 1152 * initialization. 1153 */ 1154#define FLR_WAIT_USEC 10000 /* 10 milliseconds */ 1155#define FLR_WAIT_INTERVAL 50 /* usec */ 1156#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */ 1157 1158struct pbf_pN_buf_regs { 1159 int pN; 1160 u32 init_crd; 1161 u32 crd; 1162 u32 crd_freed; 1163}; 1164 1165struct pbf_pN_cmd_regs { 1166 int pN; 1167 u32 lines_occup; 1168 u32 lines_freed; 1169}; 1170 1171static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, 1172 struct pbf_pN_buf_regs *regs, 1173 u32 poll_count) 1174{ 1175 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; 1176 u32 cur_cnt = poll_count; 1177 1178 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); 1179 crd = crd_start = REG_RD(bp, regs->crd); 1180 init_crd = REG_RD(bp, regs->init_crd); 1181 1182 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); 1183 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); 1184 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); 1185 1186 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < 1187 (init_crd - crd_start))) { 1188 if (cur_cnt--) { 1189 udelay(FLR_WAIT_INTERVAL); 1190 crd = REG_RD(bp, regs->crd); 1191 crd_freed = REG_RD(bp, regs->crd_freed); 1192 } else { 1193 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", 1194 regs->pN); 1195 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", 1196 regs->pN, crd); 1197 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", 1198 regs->pN, crd_freed); 1199 break; 1200 } 1201 } 1202 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", 1203 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1204} 1205 1206static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, 1207 struct pbf_pN_cmd_regs *regs, 1208 u32 poll_count) 1209{ 1210 u32 occup, to_free, freed, freed_start; 1211 u32 cur_cnt = poll_count; 1212 1213 occup = to_free = REG_RD(bp, regs->lines_occup); 1214 freed = freed_start = REG_RD(bp, regs->lines_freed); 1215 1216 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); 1217 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); 1218 1219 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { 1220 if (cur_cnt--) { 1221 udelay(FLR_WAIT_INTERVAL); 1222 occup = REG_RD(bp, regs->lines_occup); 1223 freed = REG_RD(bp, regs->lines_freed); 1224 } else { 1225 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", 1226 regs->pN); 1227 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", 1228 regs->pN, occup); 1229 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", 1230 regs->pN, freed); 1231 break; 1232 } 1233 } 1234 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", 1235 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 1236} 1237 1238static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 1239 u32 expected, u32 poll_count) 1240{ 1241 u32 cur_cnt = poll_count; 1242 u32 val; 1243 1244 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) 1245 udelay(FLR_WAIT_INTERVAL); 1246 1247 return val; 1248} 1249 1250int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1251 char *msg, u32 poll_cnt) 1252{ 1253 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1254 if (val != 0) { 1255 BNX2X_ERR("%s usage count=%d\n", msg, val); 1256 return 1; 1257 } 1258 return 0; 1259} 1260 1261/* Common routines with VF FLR cleanup */ 1262u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) 1263{ 1264 /* adjust polling timeout */ 1265 if (CHIP_REV_IS_EMUL(bp)) 1266 return FLR_POLL_CNT * 2000; 1267 1268 if (CHIP_REV_IS_FPGA(bp)) 1269 return FLR_POLL_CNT * 120; 1270 1271 return FLR_POLL_CNT; 1272} 1273 1274void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) 1275{ 1276 struct pbf_pN_cmd_regs cmd_regs[] = { 1277 {0, (CHIP_IS_E3B0(bp)) ? 1278 PBF_REG_TQ_OCCUPANCY_Q0 : 1279 PBF_REG_P0_TQ_OCCUPANCY, 1280 (CHIP_IS_E3B0(bp)) ? 1281 PBF_REG_TQ_LINES_FREED_CNT_Q0 : 1282 PBF_REG_P0_TQ_LINES_FREED_CNT}, 1283 {1, (CHIP_IS_E3B0(bp)) ? 1284 PBF_REG_TQ_OCCUPANCY_Q1 : 1285 PBF_REG_P1_TQ_OCCUPANCY, 1286 (CHIP_IS_E3B0(bp)) ? 1287 PBF_REG_TQ_LINES_FREED_CNT_Q1 : 1288 PBF_REG_P1_TQ_LINES_FREED_CNT}, 1289 {4, (CHIP_IS_E3B0(bp)) ? 1290 PBF_REG_TQ_OCCUPANCY_LB_Q : 1291 PBF_REG_P4_TQ_OCCUPANCY, 1292 (CHIP_IS_E3B0(bp)) ? 1293 PBF_REG_TQ_LINES_FREED_CNT_LB_Q : 1294 PBF_REG_P4_TQ_LINES_FREED_CNT} 1295 }; 1296 1297 struct pbf_pN_buf_regs buf_regs[] = { 1298 {0, (CHIP_IS_E3B0(bp)) ? 1299 PBF_REG_INIT_CRD_Q0 : 1300 PBF_REG_P0_INIT_CRD , 1301 (CHIP_IS_E3B0(bp)) ? 1302 PBF_REG_CREDIT_Q0 : 1303 PBF_REG_P0_CREDIT, 1304 (CHIP_IS_E3B0(bp)) ? 1305 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : 1306 PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, 1307 {1, (CHIP_IS_E3B0(bp)) ? 1308 PBF_REG_INIT_CRD_Q1 : 1309 PBF_REG_P1_INIT_CRD, 1310 (CHIP_IS_E3B0(bp)) ? 1311 PBF_REG_CREDIT_Q1 : 1312 PBF_REG_P1_CREDIT, 1313 (CHIP_IS_E3B0(bp)) ? 1314 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : 1315 PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, 1316 {4, (CHIP_IS_E3B0(bp)) ? 1317 PBF_REG_INIT_CRD_LB_Q : 1318 PBF_REG_P4_INIT_CRD, 1319 (CHIP_IS_E3B0(bp)) ? 1320 PBF_REG_CREDIT_LB_Q : 1321 PBF_REG_P4_CREDIT, 1322 (CHIP_IS_E3B0(bp)) ? 1323 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : 1324 PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, 1325 }; 1326 1327 int i; 1328 1329 /* Verify the command queues are flushed P0, P1, P4 */ 1330 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) 1331 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); 1332 1333 /* Verify the transmission buffers are flushed P0, P1, P4 */ 1334 for (i = 0; i < ARRAY_SIZE(buf_regs); i++) 1335 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); 1336} 1337 1338#define OP_GEN_PARAM(param) \ 1339 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) 1340 1341#define OP_GEN_TYPE(type) \ 1342 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) 1343 1344#define OP_GEN_AGG_VECT(index) \ 1345 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1346 1347int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt) 1348{ 1349 u32 op_gen_command = 0; 1350 u32 comp_addr = BAR_CSTRORM_INTMEM + 1351 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); 1352 int ret = 0; 1353 1354 if (REG_RD(bp, comp_addr)) { 1355 BNX2X_ERR("Cleanup complete was not 0 before sending\n"); 1356 return 1; 1357 } 1358 1359 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); 1360 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); 1361 op_gen_command |= OP_GEN_AGG_VECT(clnup_func); 1362 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; 1363 1364 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n"); 1365 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command); 1366 1367 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { 1368 BNX2X_ERR("FW final cleanup did not succeed\n"); 1369 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n", 1370 (REG_RD(bp, comp_addr))); 1371 bnx2x_panic(); 1372 return 1; 1373 } 1374 /* Zero completion for next FLR */ 1375 REG_WR(bp, comp_addr, 0); 1376 1377 return ret; 1378} 1379 1380u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1381{ 1382 u16 status; 1383 1384 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); 1385 return status & PCI_EXP_DEVSTA_TRPND; 1386} 1387 1388/* PF FLR specific routines 1389*/ 1390static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) 1391{ 1392 /* wait for CFC PF usage-counter to zero (includes all the VFs) */ 1393 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1394 CFC_REG_NUM_LCIDS_INSIDE_PF, 1395 "CFC PF usage counter timed out", 1396 poll_cnt)) 1397 return 1; 1398 1399 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ 1400 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1401 DORQ_REG_PF_USAGE_CNT, 1402 "DQ PF usage counter timed out", 1403 poll_cnt)) 1404 return 1; 1405 1406 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ 1407 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1408 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), 1409 "QM PF usage counter timed out", 1410 poll_cnt)) 1411 return 1; 1412 1413 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ 1414 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1415 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), 1416 "Timers VNIC usage counter timed out", 1417 poll_cnt)) 1418 return 1; 1419 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1420 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), 1421 "Timers NUM_SCANS usage counter timed out", 1422 poll_cnt)) 1423 return 1; 1424 1425 /* Wait DMAE PF usage counter to zero */ 1426 if (bnx2x_flr_clnup_poll_hw_counter(bp, 1427 dmae_reg_go_c[INIT_DMAE_C(bp)], 1428 "DMAE command register timed out", 1429 poll_cnt)) 1430 return 1; 1431 1432 return 0; 1433} 1434 1435static void bnx2x_hw_enable_status(struct bnx2x *bp) 1436{ 1437 u32 val; 1438 1439 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); 1440 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); 1441 1442 val = REG_RD(bp, PBF_REG_DISABLE_PF); 1443 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); 1444 1445 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); 1446 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); 1447 1448 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); 1449 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); 1450 1451 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); 1452 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); 1453 1454 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); 1455 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); 1456 1457 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); 1458 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); 1459 1460 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); 1461 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", 1462 val); 1463} 1464 1465static int bnx2x_pf_flr_clnup(struct bnx2x *bp) 1466{ 1467 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1468 1469 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); 1470 1471 /* Re-enable PF target read access */ 1472 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 1473 1474 /* Poll HW usage counters */ 1475 DP(BNX2X_MSG_SP, "Polling usage counters\n"); 1476 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) 1477 return -EBUSY; 1478 1479 /* Zero the igu 'trailing edge' and 'leading edge' */ 1480 1481 /* Send the FW cleanup command */ 1482 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) 1483 return -EBUSY; 1484 1485 /* ATC cleanup */ 1486 1487 /* Verify TX hw is flushed */ 1488 bnx2x_tx_hw_flushed(bp, poll_cnt); 1489 1490 /* Wait 100ms (not adjusted according to platform) */ 1491 msleep(100); 1492 1493 /* Verify no pending pci transactions */ 1494 if (bnx2x_is_pcie_pending(bp->pdev)) 1495 BNX2X_ERR("PCIE Transactions still pending\n"); 1496 1497 /* Debug */ 1498 bnx2x_hw_enable_status(bp); 1499 1500 /* 1501 * Master enable - Due to WB DMAE writes performed before this 1502 * register is re-initialized as part of the regular function init 1503 */ 1504 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 1505 1506 return 0; 1507} 1508 1509static void bnx2x_hc_int_enable(struct bnx2x *bp) 1510{ 1511 int port = BP_PORT(bp); 1512 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1513 u32 val = REG_RD(bp, addr); 1514 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1515 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1516 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1517 1518 if (msix) { 1519 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1520 HC_CONFIG_0_REG_INT_LINE_EN_0); 1521 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1522 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1523 if (single_msix) 1524 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; 1525 } else if (msi) { 1526 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1527 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1528 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1529 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1530 } else { 1531 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1532 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1533 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1534 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1535 1536 if (!CHIP_IS_E1(bp)) { 1537 DP(NETIF_MSG_IFUP, 1538 "write %x to HC %d (addr 0x%x)\n", val, port, addr); 1539 1540 REG_WR(bp, addr, val); 1541 1542 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1543 } 1544 } 1545 1546 if (CHIP_IS_E1(bp)) 1547 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF); 1548 1549 DP(NETIF_MSG_IFUP, 1550 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, 1551 (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1552 1553 REG_WR(bp, addr, val); 1554 /* 1555 * Ensure that HC_CONFIG is written before leading/trailing edge config 1556 */ 1557 mmiowb(); 1558 barrier(); 1559 1560 if (!CHIP_IS_E1(bp)) { 1561 /* init leading/trailing edge */ 1562 if (IS_MF(bp)) { 1563 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1564 if (bp->port.pmf) 1565 /* enable nig and gpio3 attention */ 1566 val |= 0x1100; 1567 } else 1568 val = 0xffff; 1569 1570 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 1571 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 1572 } 1573 1574 /* Make sure that interrupts are indeed enabled from here on */ 1575 mmiowb(); 1576} 1577 1578static void bnx2x_igu_int_enable(struct bnx2x *bp) 1579{ 1580 u32 val; 1581 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false; 1582 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false; 1583 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false; 1584 1585 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1586 1587 if (msix) { 1588 val &= ~(IGU_PF_CONF_INT_LINE_EN | 1589 IGU_PF_CONF_SINGLE_ISR_EN); 1590 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1591 IGU_PF_CONF_ATTN_BIT_EN); 1592 1593 if (single_msix) 1594 val |= IGU_PF_CONF_SINGLE_ISR_EN; 1595 } else if (msi) { 1596 val &= ~IGU_PF_CONF_INT_LINE_EN; 1597 val |= (IGU_PF_CONF_MSI_MSIX_EN | 1598 IGU_PF_CONF_ATTN_BIT_EN | 1599 IGU_PF_CONF_SINGLE_ISR_EN); 1600 } else { 1601 val &= ~IGU_PF_CONF_MSI_MSIX_EN; 1602 val |= (IGU_PF_CONF_INT_LINE_EN | 1603 IGU_PF_CONF_ATTN_BIT_EN | 1604 IGU_PF_CONF_SINGLE_ISR_EN); 1605 } 1606 1607 /* Clean previous status - need to configure igu prior to ack*/ 1608 if ((!msix) || single_msix) { 1609 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1610 bnx2x_ack_int(bp); 1611 } 1612 1613 val |= IGU_PF_CONF_FUNC_EN; 1614 1615 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n", 1616 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1617 1618 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1619 1620 if (val & IGU_PF_CONF_INT_LINE_EN) 1621 pci_intx(bp->pdev, true); 1622 1623 barrier(); 1624 1625 /* init leading/trailing edge */ 1626 if (IS_MF(bp)) { 1627 val = (0xee0f | (1 << (BP_VN(bp) + 4))); 1628 if (bp->port.pmf) 1629 /* enable nig and gpio3 attention */ 1630 val |= 0x1100; 1631 } else 1632 val = 0xffff; 1633 1634 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 1635 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 1636 1637 /* Make sure that interrupts are indeed enabled from here on */ 1638 mmiowb(); 1639} 1640 1641void bnx2x_int_enable(struct bnx2x *bp) 1642{ 1643 if (bp->common.int_block == INT_BLOCK_HC) 1644 bnx2x_hc_int_enable(bp); 1645 else 1646 bnx2x_igu_int_enable(bp); 1647} 1648 1649void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1650{ 1651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1652 int i, offset; 1653 1654 if (disable_hw) 1655 /* prevent the HW from sending interrupts */ 1656 bnx2x_int_disable(bp); 1657 1658 /* make sure all ISRs are done */ 1659 if (msix) { 1660 synchronize_irq(bp->msix_table[0].vector); 1661 offset = 1; 1662 if (CNIC_SUPPORT(bp)) 1663 offset++; 1664 for_each_eth_queue(bp, i) 1665 synchronize_irq(bp->msix_table[offset++].vector); 1666 } else 1667 synchronize_irq(bp->pdev->irq); 1668 1669 /* make sure sp_task is not running */ 1670 cancel_delayed_work(&bp->sp_task); 1671 cancel_delayed_work(&bp->period_task); 1672 flush_workqueue(bnx2x_wq); 1673} 1674 1675/* fast path */ 1676 1677/* 1678 * General service functions 1679 */ 1680 1681/* Return true if succeeded to acquire the lock */ 1682static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) 1683{ 1684 u32 lock_status; 1685 u32 resource_bit = (1 << resource); 1686 int func = BP_FUNC(bp); 1687 u32 hw_lock_control_reg; 1688 1689 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1690 "Trying to take a lock on resource %d\n", resource); 1691 1692 /* Validating that the resource is within range */ 1693 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1694 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1695 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1696 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1697 return false; 1698 } 1699 1700 if (func <= 5) 1701 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1702 else 1703 hw_lock_control_reg = 1704 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1705 1706 /* Try to acquire the lock */ 1707 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1708 lock_status = REG_RD(bp, hw_lock_control_reg); 1709 if (lock_status & resource_bit) 1710 return true; 1711 1712 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, 1713 "Failed to get a lock on resource %d\n", resource); 1714 return false; 1715} 1716 1717/** 1718 * bnx2x_get_leader_lock_resource - get the recovery leader resource id 1719 * 1720 * @bp: driver handle 1721 * 1722 * Returns the recovery leader resource id according to the engine this function 1723 * belongs to. Currently only only 2 engines is supported. 1724 */ 1725static int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1726{ 1727 if (BP_PATH(bp)) 1728 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1729 else 1730 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; 1731} 1732 1733/** 1734 * bnx2x_trylock_leader_lock- try to acquire a leader lock. 1735 * 1736 * @bp: driver handle 1737 * 1738 * Tries to acquire a leader lock for current engine. 1739 */ 1740static bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1741{ 1742 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1743} 1744 1745static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1746 1747/* schedule the sp task and mark that interrupt occurred (runs from ISR) */ 1748static int bnx2x_schedule_sp_task(struct bnx2x *bp) 1749{ 1750 /* Set the interrupt occurred bit for the sp-task to recognize it 1751 * must ack the interrupt and transition according to the IGU 1752 * state machine. 1753 */ 1754 atomic_set(&bp->interrupt_occurred, 1); 1755 1756 /* The sp_task must execute only after this bit 1757 * is set, otherwise we will get out of sync and miss all 1758 * further interrupts. Hence, the barrier. 1759 */ 1760 smp_wmb(); 1761 1762 /* schedule sp_task to workqueue */ 1763 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1764} 1765 1766void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1767{ 1768 struct bnx2x *bp = fp->bp; 1769 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1770 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); 1771 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; 1772 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 1773 1774 DP(BNX2X_MSG_SP, 1775 "fp %d cid %d got ramrod #%d state is %x type is %d\n", 1776 fp->index, cid, command, bp->state, 1777 rr_cqe->ramrod_cqe.ramrod_type); 1778 1779 /* If cid is within VF range, replace the slowpath object with the 1780 * one corresponding to this VF 1781 */ 1782 if (cid >= BNX2X_FIRST_VF_CID && 1783 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS) 1784 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj); 1785 1786 switch (command) { 1787 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): 1788 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); 1789 drv_cmd = BNX2X_Q_CMD_UPDATE; 1790 break; 1791 1792 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): 1793 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); 1794 drv_cmd = BNX2X_Q_CMD_SETUP; 1795 break; 1796 1797 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): 1798 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); 1799 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 1800 break; 1801 1802 case (RAMROD_CMD_ID_ETH_HALT): 1803 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); 1804 drv_cmd = BNX2X_Q_CMD_HALT; 1805 break; 1806 1807 case (RAMROD_CMD_ID_ETH_TERMINATE): 1808 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid); 1809 drv_cmd = BNX2X_Q_CMD_TERMINATE; 1810 break; 1811 1812 case (RAMROD_CMD_ID_ETH_EMPTY): 1813 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); 1814 drv_cmd = BNX2X_Q_CMD_EMPTY; 1815 break; 1816 1817 default: 1818 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1819 command, fp->index); 1820 return; 1821 } 1822 1823 if ((drv_cmd != BNX2X_Q_CMD_MAX) && 1824 q_obj->complete_cmd(bp, q_obj, drv_cmd)) 1825 /* q_obj->complete_cmd() failure means that this was 1826 * an unexpected completion. 1827 * 1828 * In this case we don't want to increase the bp->spq_left 1829 * because apparently we haven't sent this command the first 1830 * place. 1831 */ 1832#ifdef BNX2X_STOP_ON_ERROR 1833 bnx2x_panic(); 1834#else 1835 return; 1836#endif 1837 /* SRIOV: reschedule any 'in_progress' operations */ 1838 bnx2x_iov_sp_event(bp, cid, true); 1839 1840 smp_mb__before_atomic_inc(); 1841 atomic_inc(&bp->cq_spq_left); 1842 /* push the change in bp->spq_left and towards the memory */ 1843 smp_mb__after_atomic_inc(); 1844 1845 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1846 1847 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && 1848 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) { 1849 /* if Q update ramrod is completed for last Q in AFEX vif set 1850 * flow, then ACK MCP at the end 1851 * 1852 * mark pending ACK to MCP bit. 1853 * prevent case that both bits are cleared. 1854 * At the end of load/unload driver checks that 1855 * sp_state is cleared, and this order prevents 1856 * races 1857 */ 1858 smp_mb__before_clear_bit(); 1859 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1860 wmb(); 1861 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1862 smp_mb__after_clear_bit(); 1863 1864 /* schedule the sp task as mcp ack is required */ 1865 bnx2x_schedule_sp_task(bp); 1866 } 1867 1868 return; 1869} 1870 1871irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1872{ 1873 struct bnx2x *bp = netdev_priv(dev_instance); 1874 u16 status = bnx2x_ack_int(bp); 1875 u16 mask; 1876 int i; 1877 u8 cos; 1878 1879 /* Return here if interrupt is shared and it's not for us */ 1880 if (unlikely(status == 0)) { 1881 DP(NETIF_MSG_INTR, "not our interrupt!\n"); 1882 return IRQ_NONE; 1883 } 1884 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); 1885 1886#ifdef BNX2X_STOP_ON_ERROR 1887 if (unlikely(bp->panic)) 1888 return IRQ_HANDLED; 1889#endif 1890 1891 for_each_eth_queue(bp, i) { 1892 struct bnx2x_fastpath *fp = &bp->fp[i]; 1893 1894 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp)); 1895 if (status & mask) { 1896 /* Handle Rx or Tx according to SB id */ 1897 for_each_cos_in_tx_queue(fp, cos) 1898 prefetch(fp->txdata_ptr[cos]->tx_cons_sb); 1899 prefetch(&fp->sb_running_index[SM_RX_ID]); 1900 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1901 status &= ~mask; 1902 } 1903 } 1904 1905 if (CNIC_SUPPORT(bp)) { 1906 mask = 0x2; 1907 if (status & (mask | 0x1)) { 1908 struct cnic_ops *c_ops = NULL; 1909 1910 rcu_read_lock(); 1911 c_ops = rcu_dereference(bp->cnic_ops); 1912 if (c_ops && (bp->cnic_eth_dev.drv_state & 1913 CNIC_DRV_STATE_HANDLES_IRQ)) 1914 c_ops->cnic_handler(bp->cnic_data, NULL); 1915 rcu_read_unlock(); 1916 1917 status &= ~mask; 1918 } 1919 } 1920 1921 if (unlikely(status & 0x1)) { 1922 1923 /* schedule sp task to perform default status block work, ack 1924 * attentions and enable interrupts. 1925 */ 1926 bnx2x_schedule_sp_task(bp); 1927 1928 status &= ~0x1; 1929 if (!status) 1930 return IRQ_HANDLED; 1931 } 1932 1933 if (unlikely(status)) 1934 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 1935 status); 1936 1937 return IRQ_HANDLED; 1938} 1939 1940/* Link */ 1941 1942/* 1943 * General service functions 1944 */ 1945 1946int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) 1947{ 1948 u32 lock_status; 1949 u32 resource_bit = (1 << resource); 1950 int func = BP_FUNC(bp); 1951 u32 hw_lock_control_reg; 1952 int cnt; 1953 1954 /* Validating that the resource is within range */ 1955 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1956 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 1957 resource, HW_LOCK_MAX_RESOURCE_VALUE); 1958 return -EINVAL; 1959 } 1960 1961 if (func <= 5) { 1962 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 1963 } else { 1964 hw_lock_control_reg = 1965 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 1966 } 1967 1968 /* Validating that the resource is not already taken */ 1969 lock_status = REG_RD(bp, hw_lock_control_reg); 1970 if (lock_status & resource_bit) { 1971 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n", 1972 lock_status, resource_bit); 1973 return -EEXIST; 1974 } 1975 1976 /* Try for 5 second every 5ms */ 1977 for (cnt = 0; cnt < 1000; cnt++) { 1978 /* Try to acquire the lock */ 1979 REG_WR(bp, hw_lock_control_reg + 4, resource_bit); 1980 lock_status = REG_RD(bp, hw_lock_control_reg); 1981 if (lock_status & resource_bit) 1982 return 0; 1983 1984 usleep_range(5000, 10000); 1985 } 1986 BNX2X_ERR("Timeout\n"); 1987 return -EAGAIN; 1988} 1989 1990int bnx2x_release_leader_lock(struct bnx2x *bp) 1991{ 1992 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1993} 1994 1995int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) 1996{ 1997 u32 lock_status; 1998 u32 resource_bit = (1 << resource); 1999 int func = BP_FUNC(bp); 2000 u32 hw_lock_control_reg; 2001 2002 /* Validating that the resource is within range */ 2003 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 2004 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 2005 resource, HW_LOCK_MAX_RESOURCE_VALUE); 2006 return -EINVAL; 2007 } 2008 2009 if (func <= 5) { 2010 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); 2011 } else { 2012 hw_lock_control_reg = 2013 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); 2014 } 2015 2016 /* Validating that the resource is currently taken */ 2017 lock_status = REG_RD(bp, hw_lock_control_reg); 2018 if (!(lock_status & resource_bit)) { 2019 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n", 2020 lock_status, resource_bit); 2021 return -EFAULT; 2022 } 2023 2024 REG_WR(bp, hw_lock_control_reg, resource_bit); 2025 return 0; 2026} 2027 2028int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) 2029{ 2030 /* The GPIO should be swapped if swap register is set and active */ 2031 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2032 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2033 int gpio_shift = gpio_num + 2034 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2035 u32 gpio_mask = (1 << gpio_shift); 2036 u32 gpio_reg; 2037 int value; 2038 2039 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2040 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2041 return -EINVAL; 2042 } 2043 2044 /* read GPIO value */ 2045 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2046 2047 /* get the requested pin value */ 2048 if ((gpio_reg & gpio_mask) == gpio_mask) 2049 value = 1; 2050 else 2051 value = 0; 2052 2053 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); 2054 2055 return value; 2056} 2057 2058int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2059{ 2060 /* The GPIO should be swapped if swap register is set and active */ 2061 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2062 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2063 int gpio_shift = gpio_num + 2064 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2065 u32 gpio_mask = (1 << gpio_shift); 2066 u32 gpio_reg; 2067 2068 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2069 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2070 return -EINVAL; 2071 } 2072 2073 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2074 /* read GPIO and mask except the float bits */ 2075 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 2076 2077 switch (mode) { 2078 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2079 DP(NETIF_MSG_LINK, 2080 "Set GPIO %d (shift %d) -> output low\n", 2081 gpio_num, gpio_shift); 2082 /* clear FLOAT and set CLR */ 2083 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2084 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); 2085 break; 2086 2087 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2088 DP(NETIF_MSG_LINK, 2089 "Set GPIO %d (shift %d) -> output high\n", 2090 gpio_num, gpio_shift); 2091 /* clear FLOAT and set SET */ 2092 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2093 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 2094 break; 2095 2096 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2097 DP(NETIF_MSG_LINK, 2098 "Set GPIO %d (shift %d) -> input\n", 2099 gpio_num, gpio_shift); 2100 /* set FLOAT */ 2101 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); 2102 break; 2103 2104 default: 2105 break; 2106 } 2107 2108 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2109 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2110 2111 return 0; 2112} 2113 2114int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) 2115{ 2116 u32 gpio_reg = 0; 2117 int rc = 0; 2118 2119 /* Any port swapping should be handled by caller. */ 2120 2121 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2122 /* read GPIO and mask except the float bits */ 2123 gpio_reg = REG_RD(bp, MISC_REG_GPIO); 2124 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2125 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); 2126 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); 2127 2128 switch (mode) { 2129 case MISC_REGISTERS_GPIO_OUTPUT_LOW: 2130 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); 2131 /* set CLR */ 2132 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); 2133 break; 2134 2135 case MISC_REGISTERS_GPIO_OUTPUT_HIGH: 2136 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); 2137 /* set SET */ 2138 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); 2139 break; 2140 2141 case MISC_REGISTERS_GPIO_INPUT_HI_Z: 2142 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); 2143 /* set FLOAT */ 2144 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); 2145 break; 2146 2147 default: 2148 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); 2149 rc = -EINVAL; 2150 break; 2151 } 2152 2153 if (rc == 0) 2154 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 2155 2156 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2157 2158 return rc; 2159} 2160 2161int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) 2162{ 2163 /* The GPIO should be swapped if swap register is set and active */ 2164 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 2165 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; 2166 int gpio_shift = gpio_num + 2167 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 2168 u32 gpio_mask = (1 << gpio_shift); 2169 u32 gpio_reg; 2170 2171 if (gpio_num > MISC_REGISTERS_GPIO_3) { 2172 BNX2X_ERR("Invalid GPIO %d\n", gpio_num); 2173 return -EINVAL; 2174 } 2175 2176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2177 /* read GPIO int */ 2178 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); 2179 2180 switch (mode) { 2181 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: 2182 DP(NETIF_MSG_LINK, 2183 "Clear GPIO INT %d (shift %d) -> output low\n", 2184 gpio_num, gpio_shift); 2185 /* clear SET and set CLR */ 2186 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2187 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2188 break; 2189 2190 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: 2191 DP(NETIF_MSG_LINK, 2192 "Set GPIO INT %d (shift %d) -> output high\n", 2193 gpio_num, gpio_shift); 2194 /* clear CLR and set SET */ 2195 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); 2196 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); 2197 break; 2198 2199 default: 2200 break; 2201 } 2202 2203 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); 2204 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 2205 2206 return 0; 2207} 2208 2209static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) 2210{ 2211 u32 spio_reg; 2212 2213 /* Only 2 SPIOs are configurable */ 2214 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { 2215 BNX2X_ERR("Invalid SPIO 0x%x\n", spio); 2216 return -EINVAL; 2217 } 2218 2219 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2220 /* read SPIO and mask except the float bits */ 2221 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT); 2222 2223 switch (mode) { 2224 case MISC_SPIO_OUTPUT_LOW: 2225 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio); 2226 /* clear FLOAT and set CLR */ 2227 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2228 spio_reg |= (spio << MISC_SPIO_CLR_POS); 2229 break; 2230 2231 case MISC_SPIO_OUTPUT_HIGH: 2232 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio); 2233 /* clear FLOAT and set SET */ 2234 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); 2235 spio_reg |= (spio << MISC_SPIO_SET_POS); 2236 break; 2237 2238 case MISC_SPIO_INPUT_HI_Z: 2239 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio); 2240 /* set FLOAT */ 2241 spio_reg |= (spio << MISC_SPIO_FLOAT_POS); 2242 break; 2243 2244 default: 2245 break; 2246 } 2247 2248 REG_WR(bp, MISC_REG_SPIO, spio_reg); 2249 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2250 2251 return 0; 2252} 2253 2254void bnx2x_calc_fc_adv(struct bnx2x *bp) 2255{ 2256 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2257 switch (bp->link_vars.ieee_fc & 2258 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2259 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 2260 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2261 ADVERTISED_Pause); 2262 break; 2263 2264 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2265 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2266 ADVERTISED_Pause); 2267 break; 2268 2269 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 2270 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; 2271 break; 2272 2273 default: 2274 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2275 ADVERTISED_Pause); 2276 break; 2277 } 2278} 2279 2280static void bnx2x_set_requested_fc(struct bnx2x *bp) 2281{ 2282 /* Initialize link parameters structure variables 2283 * It is recommended to turn off RX FC for jumbo frames 2284 * for better performance 2285 */ 2286 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) 2287 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 2288 else 2289 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2290} 2291 2292static void bnx2x_init_dropless_fc(struct bnx2x *bp) 2293{ 2294 u32 pause_enabled = 0; 2295 2296 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) { 2297 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 2298 pause_enabled = 1; 2299 2300 REG_WR(bp, BAR_USTRORM_INTMEM + 2301 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)), 2302 pause_enabled); 2303 } 2304 2305 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n", 2306 pause_enabled ? "enabled" : "disabled"); 2307} 2308 2309int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2310{ 2311 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); 2312 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2313 2314 if (!BP_NOMCP(bp)) { 2315 bnx2x_set_requested_fc(bp); 2316 bnx2x_acquire_phy_lock(bp); 2317 2318 if (load_mode == LOAD_DIAG) { 2319 struct link_params *lp = &bp->link_params; 2320 lp->loopback_mode = LOOPBACK_XGXS; 2321 /* do PHY loopback at 10G speed, if possible */ 2322 if (lp->req_line_speed[cfx_idx] < SPEED_10000) { 2323 if (lp->speed_cap_mask[cfx_idx] & 2324 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2325 lp->req_line_speed[cfx_idx] = 2326 SPEED_10000; 2327 else 2328 lp->req_line_speed[cfx_idx] = 2329 SPEED_1000; 2330 } 2331 } 2332 2333 if (load_mode == LOAD_LOOPBACK_EXT) { 2334 struct link_params *lp = &bp->link_params; 2335 lp->loopback_mode = LOOPBACK_EXT; 2336 } 2337 2338 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2339 2340 bnx2x_release_phy_lock(bp); 2341 2342 bnx2x_init_dropless_fc(bp); 2343 2344 bnx2x_calc_fc_adv(bp); 2345 2346 if (bp->link_vars.link_up) { 2347 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2348 bnx2x_link_report(bp); 2349 } 2350 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2351 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2352 return rc; 2353 } 2354 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 2355 return -EINVAL; 2356} 2357 2358void bnx2x_link_set(struct bnx2x *bp) 2359{ 2360 if (!BP_NOMCP(bp)) { 2361 bnx2x_acquire_phy_lock(bp); 2362 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2363 bnx2x_release_phy_lock(bp); 2364 2365 bnx2x_init_dropless_fc(bp); 2366 2367 bnx2x_calc_fc_adv(bp); 2368 } else 2369 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2370} 2371 2372static void bnx2x__link_reset(struct bnx2x *bp) 2373{ 2374 if (!BP_NOMCP(bp)) { 2375 bnx2x_acquire_phy_lock(bp); 2376 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); 2377 bnx2x_release_phy_lock(bp); 2378 } else 2379 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 2380} 2381 2382void bnx2x_force_link_reset(struct bnx2x *bp) 2383{ 2384 bnx2x_acquire_phy_lock(bp); 2385 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); 2386 bnx2x_release_phy_lock(bp); 2387} 2388 2389u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) 2390{ 2391 u8 rc = 0; 2392 2393 if (!BP_NOMCP(bp)) { 2394 bnx2x_acquire_phy_lock(bp); 2395 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars, 2396 is_serdes); 2397 bnx2x_release_phy_lock(bp); 2398 } else 2399 BNX2X_ERR("Bootcode is missing - can not test link\n"); 2400 2401 return rc; 2402} 2403 2404/* Calculates the sum of vn_min_rates. 2405 It's needed for further normalizing of the min_rates. 2406 Returns: 2407 sum of vn_min_rates. 2408 or 2409 0 - if all the min_rates are 0. 2410 In the later case fairness algorithm should be deactivated. 2411 If not all min_rates are zero then those that are zeroes will be set to 1. 2412 */ 2413static void bnx2x_calc_vn_min(struct bnx2x *bp, 2414 struct cmng_init_input *input) 2415{ 2416 int all_zero = 1; 2417 int vn; 2418 2419 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2420 u32 vn_cfg = bp->mf_config[vn]; 2421 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2422 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2423 2424 /* Skip hidden vns */ 2425 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2426 vn_min_rate = 0; 2427 /* If min rate is zero - set it to 1 */ 2428 else if (!vn_min_rate) 2429 vn_min_rate = DEF_MIN_RATE; 2430 else 2431 all_zero = 0; 2432 2433 input->vnic_min_rate[vn] = vn_min_rate; 2434 } 2435 2436 /* if ETS or all min rates are zeros - disable fairness */ 2437 if (BNX2X_IS_ETS_ENABLED(bp)) { 2438 input->flags.cmng_enables &= 2439 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2440 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2441 } else if (all_zero) { 2442 input->flags.cmng_enables &= 2443 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2444 DP(NETIF_MSG_IFUP, 2445 "All MIN values are zeroes fairness will be disabled\n"); 2446 } else 2447 input->flags.cmng_enables |= 2448 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2449} 2450 2451static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, 2452 struct cmng_init_input *input) 2453{ 2454 u16 vn_max_rate; 2455 u32 vn_cfg = bp->mf_config[vn]; 2456 2457 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2458 vn_max_rate = 0; 2459 else { 2460 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2461 2462 if (IS_MF_SI(bp)) { 2463 /* maxCfg in percents of linkspeed */ 2464 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2465 } else /* SD modes */ 2466 /* maxCfg is absolute in 100Mb units */ 2467 vn_max_rate = maxCfg * 100; 2468 } 2469 2470 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); 2471 2472 input->vnic_max_rate[vn] = vn_max_rate; 2473} 2474 2475static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2476{ 2477 if (CHIP_REV_IS_SLOW(bp)) 2478 return CMNG_FNS_NONE; 2479 if (IS_MF(bp)) 2480 return CMNG_FNS_MINMAX; 2481 2482 return CMNG_FNS_NONE; 2483} 2484 2485void bnx2x_read_mf_cfg(struct bnx2x *bp) 2486{ 2487 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1); 2488 2489 if (BP_NOMCP(bp)) 2490 return; /* what should be the default value in this case */ 2491 2492 /* For 2 port configuration the absolute function number formula 2493 * is: 2494 * abs_func = 2 * vn + BP_PORT + BP_PATH 2495 * 2496 * and there are 4 functions per port 2497 * 2498 * For 4 port configuration it is 2499 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH 2500 * 2501 * and there are 2 functions per port 2502 */ 2503 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2504 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp); 2505 2506 if (func >= E1H_FUNC_MAX) 2507 break; 2508 2509 bp->mf_config[vn] = 2510 MF_CFG_RD(bp, func_mf_config[func].config); 2511 } 2512 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 2513 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 2514 bp->flags |= MF_FUNC_DIS; 2515 } else { 2516 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2517 bp->flags &= ~MF_FUNC_DIS; 2518 } 2519} 2520 2521static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2522{ 2523 struct cmng_init_input input; 2524 memset(&input, 0, sizeof(struct cmng_init_input)); 2525 2526 input.port_rate = bp->link_vars.line_speed; 2527 2528 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { 2529 int vn; 2530 2531 /* read mf conf from shmem */ 2532 if (read_cfg) 2533 bnx2x_read_mf_cfg(bp); 2534 2535 /* vn_weight_sum and enable fairness if not 0 */ 2536 bnx2x_calc_vn_min(bp, &input); 2537 2538 /* calculate and set min-max rate for each vn */ 2539 if (bp->port.pmf) 2540 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2541 bnx2x_calc_vn_max(bp, vn, &input); 2542 2543 /* always enable rate shaping and fairness */ 2544 input.flags.cmng_enables |= 2545 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2546 2547 bnx2x_init_cmng(&input, &bp->cmng); 2548 return; 2549 } 2550 2551 /* rate shaping and fairness are disabled */ 2552 DP(NETIF_MSG_IFUP, 2553 "rate shaping and fairness are disabled\n"); 2554} 2555 2556static void storm_memset_cmng(struct bnx2x *bp, 2557 struct cmng_init *cmng, 2558 u8 port) 2559{ 2560 int vn; 2561 size_t size = sizeof(struct cmng_struct_per_port); 2562 2563 u32 addr = BAR_XSTRORM_INTMEM + 2564 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 2565 2566 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port); 2567 2568 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2569 int func = func_by_vn(bp, vn); 2570 2571 addr = BAR_XSTRORM_INTMEM + 2572 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func); 2573 size = sizeof(struct rate_shaping_vars_per_vn); 2574 __storm_memset_struct(bp, addr, size, 2575 (u32 *)&cmng->vnic.vnic_max_rate[vn]); 2576 2577 addr = BAR_XSTRORM_INTMEM + 2578 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func); 2579 size = sizeof(struct fairness_vars_per_vn); 2580 __storm_memset_struct(bp, addr, size, 2581 (u32 *)&cmng->vnic.vnic_min_rate[vn]); 2582 } 2583} 2584 2585/* init cmng mode in HW according to local configuration */ 2586void bnx2x_set_local_cmng(struct bnx2x *bp) 2587{ 2588 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2589 2590 if (cmng_fns != CMNG_FNS_NONE) { 2591 bnx2x_cmng_fns_init(bp, false, cmng_fns); 2592 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 2593 } else { 2594 /* rate shaping and fairness are disabled */ 2595 DP(NETIF_MSG_IFUP, 2596 "single function mode without fairness\n"); 2597 } 2598} 2599 2600/* This function is called upon link interrupt */ 2601static void bnx2x_link_attn(struct bnx2x *bp) 2602{ 2603 /* Make sure that we are synced with the current statistics */ 2604 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2605 2606 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2607 2608 bnx2x_init_dropless_fc(bp); 2609 2610 if (bp->link_vars.link_up) { 2611 2612 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2613 struct host_port_stats *pstats; 2614 2615 pstats = bnx2x_sp(bp, port_stats); 2616 /* reset old mac stats */ 2617 memset(&(pstats->mac_stx[0]), 0, 2618 sizeof(struct mac_stx)); 2619 } 2620 if (bp->state == BNX2X_STATE_OPEN) 2621 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2622 } 2623 2624 if (bp->link_vars.link_up && bp->link_vars.line_speed) 2625 bnx2x_set_local_cmng(bp); 2626 2627 __bnx2x_link_report(bp); 2628 2629 if (IS_MF(bp)) 2630 bnx2x_link_sync_notify(bp); 2631} 2632 2633void bnx2x__link_status_update(struct bnx2x *bp) 2634{ 2635 if (bp->state != BNX2X_STATE_OPEN) 2636 return; 2637 2638 /* read updated dcb configuration */ 2639 if (IS_PF(bp)) { 2640 bnx2x_dcbx_pmf_update(bp); 2641 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2642 if (bp->link_vars.link_up) 2643 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2644 else 2645 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2646 /* indicate link status */ 2647 bnx2x_link_report(bp); 2648 2649 } else { /* VF */ 2650 bp->port.supported[0] |= (SUPPORTED_10baseT_Half | 2651 SUPPORTED_10baseT_Full | 2652 SUPPORTED_100baseT_Half | 2653 SUPPORTED_100baseT_Full | 2654 SUPPORTED_1000baseT_Full | 2655 SUPPORTED_2500baseX_Full | 2656 SUPPORTED_10000baseT_Full | 2657 SUPPORTED_TP | 2658 SUPPORTED_FIBRE | 2659 SUPPORTED_Autoneg | 2660 SUPPORTED_Pause | 2661 SUPPORTED_Asym_Pause); 2662 bp->port.advertising[0] = bp->port.supported[0]; 2663 2664 bp->link_params.bp = bp; 2665 bp->link_params.port = BP_PORT(bp); 2666 bp->link_params.req_duplex[0] = DUPLEX_FULL; 2667 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE; 2668 bp->link_params.req_line_speed[0] = SPEED_10000; 2669 bp->link_params.speed_cap_mask[0] = 0x7f0000; 2670 bp->link_params.switch_cfg = SWITCH_CFG_10G; 2671 bp->link_vars.mac_type = MAC_TYPE_BMAC; 2672 bp->link_vars.line_speed = SPEED_10000; 2673 bp->link_vars.link_status = 2674 (LINK_STATUS_LINK_UP | 2675 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); 2676 bp->link_vars.link_up = 1; 2677 bp->link_vars.duplex = DUPLEX_FULL; 2678 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE; 2679 __bnx2x_link_report(bp); 2680 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2681 } 2682} 2683 2684static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid, 2685 u16 vlan_val, u8 allowed_prio) 2686{ 2687 struct bnx2x_func_state_params func_params = {NULL}; 2688 struct bnx2x_func_afex_update_params *f_update_params = 2689 &func_params.params.afex_update; 2690 2691 func_params.f_obj = &bp->func_obj; 2692 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE; 2693 2694 /* no need to wait for RAMROD completion, so don't 2695 * set RAMROD_COMP_WAIT flag 2696 */ 2697 2698 f_update_params->vif_id = vifid; 2699 f_update_params->afex_default_vlan = vlan_val; 2700 f_update_params->allowed_priorities = allowed_prio; 2701 2702 /* if ramrod can not be sent, response to MCP immediately */ 2703 if (bnx2x_func_state_change(bp, &func_params) < 0) 2704 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 2705 2706 return 0; 2707} 2708 2709static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type, 2710 u16 vif_index, u8 func_bit_map) 2711{ 2712 struct bnx2x_func_state_params func_params = {NULL}; 2713 struct bnx2x_func_afex_viflists_params *update_params = 2714 &func_params.params.afex_viflists; 2715 int rc; 2716 u32 drv_msg_code; 2717 2718 /* validate only LIST_SET and LIST_GET are received from switch */ 2719 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET)) 2720 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n", 2721 cmd_type); 2722 2723 func_params.f_obj = &bp->func_obj; 2724 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS; 2725 2726 /* set parameters according to cmd_type */ 2727 update_params->afex_vif_list_command = cmd_type; 2728 update_params->vif_list_index = vif_index; 2729 update_params->func_bit_map = 2730 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map; 2731 update_params->func_to_clear = 0; 2732 drv_msg_code = 2733 (cmd_type == VIF_LIST_RULE_GET) ? 2734 DRV_MSG_CODE_AFEX_LISTGET_ACK : 2735 DRV_MSG_CODE_AFEX_LISTSET_ACK; 2736 2737 /* if ramrod can not be sent, respond to MCP immediately for 2738 * SET and GET requests (other are not triggered from MCP) 2739 */ 2740 rc = bnx2x_func_state_change(bp, &func_params); 2741 if (rc < 0) 2742 bnx2x_fw_command(bp, drv_msg_code, 0); 2743 2744 return 0; 2745} 2746 2747static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) 2748{ 2749 struct afex_stats afex_stats; 2750 u32 func = BP_ABS_FUNC(bp); 2751 u32 mf_config; 2752 u16 vlan_val; 2753 u32 vlan_prio; 2754 u16 vif_id; 2755 u8 allowed_prio; 2756 u8 vlan_mode; 2757 u32 addr_to_write, vifid, addrs, stats_type, i; 2758 2759 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) { 2760 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2761 DP(BNX2X_MSG_MCP, 2762 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid); 2763 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0); 2764 } 2765 2766 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) { 2767 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2768 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]); 2769 DP(BNX2X_MSG_MCP, 2770 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n", 2771 vifid, addrs); 2772 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid, 2773 addrs); 2774 } 2775 2776 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) { 2777 addr_to_write = SHMEM2_RD(bp, 2778 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]); 2779 stats_type = SHMEM2_RD(bp, 2780 afex_param1_to_driver[BP_FW_MB_IDX(bp)]); 2781 2782 DP(BNX2X_MSG_MCP, 2783 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n", 2784 addr_to_write); 2785 2786 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type); 2787 2788 /* write response to scratchpad, for MCP */ 2789 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++) 2790 REG_WR(bp, addr_to_write + i*sizeof(u32), 2791 *(((u32 *)(&afex_stats))+i)); 2792 2793 /* send ack message to MCP */ 2794 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0); 2795 } 2796 2797 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) { 2798 mf_config = MF_CFG_RD(bp, func_mf_config[func].config); 2799 bp->mf_config[BP_VN(bp)] = mf_config; 2800 DP(BNX2X_MSG_MCP, 2801 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n", 2802 mf_config); 2803 2804 /* if VIF_SET is "enabled" */ 2805 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) { 2806 /* set rate limit directly to internal RAM */ 2807 struct cmng_init_input cmng_input; 2808 struct rate_shaping_vars_per_vn m_rs_vn; 2809 size_t size = sizeof(struct rate_shaping_vars_per_vn); 2810 u32 addr = BAR_XSTRORM_INTMEM + 2811 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp)); 2812 2813 bp->mf_config[BP_VN(bp)] = mf_config; 2814 2815 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input); 2816 m_rs_vn.vn_counter.rate = 2817 cmng_input.vnic_max_rate[BP_VN(bp)]; 2818 m_rs_vn.vn_counter.quota = 2819 (m_rs_vn.vn_counter.rate * 2820 RS_PERIODIC_TIMEOUT_USEC) / 8; 2821 2822 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn); 2823 2824 /* read relevant values from mf_cfg struct in shmem */ 2825 vif_id = 2826 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2827 FUNC_MF_CFG_E1HOV_TAG_MASK) >> 2828 FUNC_MF_CFG_E1HOV_TAG_SHIFT; 2829 vlan_val = 2830 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 2831 FUNC_MF_CFG_AFEX_VLAN_MASK) >> 2832 FUNC_MF_CFG_AFEX_VLAN_SHIFT; 2833 vlan_prio = (mf_config & 2834 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> 2835 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT; 2836 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT); 2837 vlan_mode = 2838 (MF_CFG_RD(bp, 2839 func_mf_config[func].afex_config) & 2840 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> 2841 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT; 2842 allowed_prio = 2843 (MF_CFG_RD(bp, 2844 func_mf_config[func].afex_config) & 2845 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> 2846 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT; 2847 2848 /* send ramrod to FW, return in case of failure */ 2849 if (bnx2x_afex_func_update(bp, vif_id, vlan_val, 2850 allowed_prio)) 2851 return; 2852 2853 bp->afex_def_vlan_tag = vlan_val; 2854 bp->afex_vlan_mode = vlan_mode; 2855 } else { 2856 /* notify link down because BP->flags is disabled */ 2857 bnx2x_link_report(bp); 2858 2859 /* send INVALID VIF ramrod to FW */ 2860 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0); 2861 2862 /* Reset the default afex VLAN */ 2863 bp->afex_def_vlan_tag = -1; 2864 } 2865 } 2866} 2867 2868static void bnx2x_pmf_update(struct bnx2x *bp) 2869{ 2870 int port = BP_PORT(bp); 2871 u32 val; 2872 2873 bp->port.pmf = 1; 2874 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf); 2875 2876 /* 2877 * We need the mb() to ensure the ordering between the writing to 2878 * bp->port.pmf here and reading it from the bnx2x_periodic_task(). 2879 */ 2880 smp_mb(); 2881 2882 /* queue a periodic task */ 2883 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2884 2885 bnx2x_dcbx_pmf_update(bp); 2886 2887 /* enable nig attention */ 2888 val = (0xff0f | (1 << (BP_VN(bp) + 4))); 2889 if (bp->common.int_block == INT_BLOCK_HC) { 2890 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2891 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2892 } else if (!CHIP_IS_E1x(bp)) { 2893 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); 2894 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); 2895 } 2896 2897 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2898} 2899 2900/* end of Link */ 2901 2902/* slow path */ 2903 2904/* 2905 * General service functions 2906 */ 2907 2908/* send the MCP a request, block until there is a reply */ 2909u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2910{ 2911 int mb_idx = BP_FW_MB_IDX(bp); 2912 u32 seq; 2913 u32 rc = 0; 2914 u32 cnt = 1; 2915 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2916 2917 mutex_lock(&bp->fw_mb_mutex); 2918 seq = ++bp->fw_seq; 2919 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); 2920 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); 2921 2922 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", 2923 (command | seq), param); 2924 2925 do { 2926 /* let the FW do it's magic ... */ 2927 msleep(delay); 2928 2929 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header); 2930 2931 /* Give the FW up to 5 second (500*10ms) */ 2932 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2933 2934 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2935 cnt*delay, rc, seq); 2936 2937 /* is this a reply to our command? */ 2938 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) 2939 rc &= FW_MSG_CODE_MASK; 2940 else { 2941 /* FW BUG! */ 2942 BNX2X_ERR("FW failed to respond!\n"); 2943 bnx2x_fw_dump(bp); 2944 rc = 0; 2945 } 2946 mutex_unlock(&bp->fw_mb_mutex); 2947 2948 return rc; 2949} 2950 2951static void storm_memset_func_cfg(struct bnx2x *bp, 2952 struct tstorm_eth_function_common_config *tcfg, 2953 u16 abs_fid) 2954{ 2955 size_t size = sizeof(struct tstorm_eth_function_common_config); 2956 2957 u32 addr = BAR_TSTRORM_INTMEM + 2958 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 2959 2960 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 2961} 2962 2963void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2964{ 2965 if (CHIP_IS_E1x(bp)) { 2966 struct tstorm_eth_function_common_config tcfg = {0}; 2967 2968 storm_memset_func_cfg(bp, &tcfg, p->func_id); 2969 } 2970 2971 /* Enable the function in the FW */ 2972 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); 2973 storm_memset_func_en(bp, p->func_id, 1); 2974 2975 /* spq */ 2976 if (p->func_flgs & FUNC_FLG_SPQ) { 2977 storm_memset_spq_addr(bp, p->spq_map, p->func_id); 2978 REG_WR(bp, XSEM_REG_FAST_MEMORY + 2979 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); 2980 } 2981} 2982 2983/** 2984 * bnx2x_get_common_flags - Return common flags 2985 * 2986 * @bp device handle 2987 * @fp queue handle 2988 * @zero_stats TRUE if statistics zeroing is needed 2989 * 2990 * Return the flags that are common for the Tx-only and not normal connections. 2991 */ 2992static unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 2993 struct bnx2x_fastpath *fp, 2994 bool zero_stats) 2995{ 2996 unsigned long flags = 0; 2997 2998 /* PF driver will always initialize the Queue to an ACTIVE state */ 2999 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 3000 3001 /* tx only connections collect statistics (on the same index as the 3002 * parent connection). The statistics are zeroed when the parent 3003 * connection is initialized. 3004 */ 3005 3006 __set_bit(BNX2X_Q_FLG_STATS, &flags); 3007 if (zero_stats) 3008 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 3009 3010 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags); 3011 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); 3012 3013#ifdef BNX2X_STOP_ON_ERROR 3014 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); 3015#endif 3016 3017 return flags; 3018} 3019 3020static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 3021 struct bnx2x_fastpath *fp, 3022 bool leading) 3023{ 3024 unsigned long flags = 0; 3025 3026 /* calculate other queue flags */ 3027 if (IS_MF_SD(bp)) 3028 __set_bit(BNX2X_Q_FLG_OV, &flags); 3029 3030 if (IS_FCOE_FP(fp)) { 3031 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 3032 /* For FCoE - force usage of default priority (for afex) */ 3033 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); 3034 } 3035 3036 if (!fp->disable_tpa) { 3037 __set_bit(BNX2X_Q_FLG_TPA, &flags); 3038 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 3039 if (fp->mode == TPA_MODE_GRO) 3040 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags); 3041 } 3042 3043 if (leading) { 3044 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); 3045 __set_bit(BNX2X_Q_FLG_MCAST, &flags); 3046 } 3047 3048 /* Always set HW VLAN stripping */ 3049 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 3050 3051 /* configure silent vlan removal */ 3052 if (IS_MF_AFEX(bp)) 3053 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags); 3054 3055 return flags | bnx2x_get_common_flags(bp, fp, true); 3056} 3057 3058static void bnx2x_pf_q_prep_general(struct bnx2x *bp, 3059 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, 3060 u8 cos) 3061{ 3062 gen_init->stat_id = bnx2x_stats_id(fp); 3063 gen_init->spcl_id = fp->cl_id; 3064 3065 /* Always use mini-jumbo MTU for FCoE L2 ring */ 3066 if (IS_FCOE_FP(fp)) 3067 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 3068 else 3069 gen_init->mtu = bp->dev->mtu; 3070 3071 gen_init->cos = cos; 3072} 3073 3074static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, 3075 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, 3076 struct bnx2x_rxq_setup_params *rxq_init) 3077{ 3078 u8 max_sge = 0; 3079 u16 sge_sz = 0; 3080 u16 tpa_agg_size = 0; 3081 3082 if (!fp->disable_tpa) { 3083 pause->sge_th_lo = SGE_TH_LO(bp); 3084 pause->sge_th_hi = SGE_TH_HI(bp); 3085 3086 /* validate SGE ring has enough to cross high threshold */ 3087 WARN_ON(bp->dropless_fc && 3088 pause->sge_th_hi + FW_PREFETCH_CNT > 3089 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); 3090 3091 tpa_agg_size = TPA_AGG_SIZE; 3092 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> 3093 SGE_PAGE_SHIFT; 3094 max_sge = ((max_sge + PAGES_PER_SGE - 1) & 3095 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; 3096 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff); 3097 } 3098 3099 /* pause - not for e1 */ 3100 if (!CHIP_IS_E1(bp)) { 3101 pause->bd_th_lo = BD_TH_LO(bp); 3102 pause->bd_th_hi = BD_TH_HI(bp); 3103 3104 pause->rcq_th_lo = RCQ_TH_LO(bp); 3105 pause->rcq_th_hi = RCQ_TH_HI(bp); 3106 /* 3107 * validate that rings have enough entries to cross 3108 * high thresholds 3109 */ 3110 WARN_ON(bp->dropless_fc && 3111 pause->bd_th_hi + FW_PREFETCH_CNT > 3112 bp->rx_ring_size); 3113 WARN_ON(bp->dropless_fc && 3114 pause->rcq_th_hi + FW_PREFETCH_CNT > 3115 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT); 3116 3117 pause->pri_map = 1; 3118 } 3119 3120 /* rxq setup */ 3121 rxq_init->dscr_map = fp->rx_desc_mapping; 3122 rxq_init->sge_map = fp->rx_sge_mapping; 3123 rxq_init->rcq_map = fp->rx_comp_mapping; 3124 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; 3125 3126 /* This should be a maximum number of data bytes that may be 3127 * placed on the BD (not including paddings). 3128 */ 3129 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START - 3130 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING; 3131 3132 rxq_init->cl_qzone_id = fp->cl_qzone_id; 3133 rxq_init->tpa_agg_sz = tpa_agg_size; 3134 rxq_init->sge_buf_sz = sge_sz; 3135 rxq_init->max_sges_pkt = max_sge; 3136 rxq_init->rss_engine_id = BP_FUNC(bp); 3137 rxq_init->mcast_engine_id = BP_FUNC(bp); 3138 3139 /* Maximum number or simultaneous TPA aggregation for this Queue. 3140 * 3141 * For PF Clients it should be the maximum available number. 3142 * VF driver(s) may want to define it to a smaller value. 3143 */ 3144 rxq_init->max_tpa_queues = MAX_AGG_QS(bp); 3145 3146 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 3147 rxq_init->fw_sb_id = fp->fw_sb_id; 3148 3149 if (IS_FCOE_FP(fp)) 3150 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 3151 else 3152 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 3153 /* configure silent vlan removal 3154 * if multi function mode is afex, then mask default vlan 3155 */ 3156 if (IS_MF_AFEX(bp)) { 3157 rxq_init->silent_removal_value = bp->afex_def_vlan_tag; 3158 rxq_init->silent_removal_mask = VLAN_VID_MASK; 3159 } 3160} 3161 3162static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 3163 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, 3164 u8 cos) 3165{ 3166 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; 3167 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; 3168 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 3169 txq_init->fw_sb_id = fp->fw_sb_id; 3170 3171 /* 3172 * set the tss leading client id for TX classification == 3173 * leading RSS client id 3174 */ 3175 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); 3176 3177 if (IS_FCOE_FP(fp)) { 3178 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; 3179 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; 3180 } 3181} 3182 3183static void bnx2x_pf_init(struct bnx2x *bp) 3184{ 3185 struct bnx2x_func_init_params func_init = {0}; 3186 struct event_ring_data eq_data = { {0} }; 3187 u16 flags; 3188 3189 if (!CHIP_IS_E1x(bp)) { 3190 /* reset IGU PF statistics: MSIX + ATTN */ 3191 /* PF */ 3192 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3193 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3194 (CHIP_MODE_IS_4_PORT(bp) ? 3195 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3196 /* ATTN */ 3197 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + 3198 BNX2X_IGU_STAS_MSG_VF_CNT*4 + 3199 BNX2X_IGU_STAS_MSG_PF_CNT*4 + 3200 (CHIP_MODE_IS_4_PORT(bp) ? 3201 BP_FUNC(bp) : BP_VN(bp))*4, 0); 3202 } 3203 3204 /* function setup flags */ 3205 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); 3206 3207 /* This flag is relevant for E1x only. 3208 * E2 doesn't have a TPA configuration in a function level. 3209 */ 3210 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; 3211 3212 func_init.func_flgs = flags; 3213 func_init.pf_id = BP_FUNC(bp); 3214 func_init.func_id = BP_FUNC(bp); 3215 func_init.spq_map = bp->spq_mapping; 3216 func_init.spq_prod = bp->spq_prod_idx; 3217 3218 bnx2x_func_init(bp, &func_init); 3219 3220 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); 3221 3222 /* 3223 * Congestion management values depend on the link rate 3224 * There is no active link so initial link rate is set to 10 Gbps. 3225 * When the link comes up The congestion management values are 3226 * re-calculated according to the actual link rate. 3227 */ 3228 bp->link_vars.line_speed = SPEED_10000; 3229 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); 3230 3231 /* Only the PMF sets the HW */ 3232 if (bp->port.pmf) 3233 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3234 3235 /* init Event Queue - PCI bus guarantees correct endianity*/ 3236 eq_data.base_addr.hi = U64_HI(bp->eq_mapping); 3237 eq_data.base_addr.lo = U64_LO(bp->eq_mapping); 3238 eq_data.producer = bp->eq_prod; 3239 eq_data.index_id = HC_SP_INDEX_EQ_CONS; 3240 eq_data.sb_id = DEF_SB_ID; 3241 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp)); 3242} 3243 3244static void bnx2x_e1h_disable(struct bnx2x *bp) 3245{ 3246 int port = BP_PORT(bp); 3247 3248 bnx2x_tx_disable(bp); 3249 3250 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 3251} 3252 3253static void bnx2x_e1h_enable(struct bnx2x *bp) 3254{ 3255 int port = BP_PORT(bp); 3256 3257 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 3258 3259 /* Tx queue should be only re-enabled */ 3260 netif_tx_wake_all_queues(bp->dev); 3261 3262 /* 3263 * Should not call netif_carrier_on since it will be called if the link 3264 * is up when checking for link state 3265 */ 3266} 3267 3268#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 3269 3270static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) 3271{ 3272 struct eth_stats_info *ether_stat = 3273 &bp->slowpath->drv_info_to_mcp.ether_stat; 3274 struct bnx2x_vlan_mac_obj *mac_obj = 3275 &bp->sp_objs->mac_obj; 3276 int i; 3277 3278 strlcpy(ether_stat->version, DRV_MODULE_VERSION, 3279 ETH_STAT_INFO_VERSION_LEN); 3280 3281 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the 3282 * mac_local field in ether_stat struct. The base address is offset by 2 3283 * bytes to account for the field being 8 bytes but a mac address is 3284 * only 6 bytes. Likewise, the stride for the get_n_elements function is 3285 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes 3286 * allocated by the ether_stat struct, so the macs will land in their 3287 * proper positions. 3288 */ 3289 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++) 3290 memset(ether_stat->mac_local + i, 0, 3291 sizeof(ether_stat->mac_local[0])); 3292 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj, 3293 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3294 ether_stat->mac_local + MAC_PAD, MAC_PAD, 3295 ETH_ALEN); 3296 ether_stat->mtu_size = bp->dev->mtu; 3297 if (bp->dev->features & NETIF_F_RXCSUM) 3298 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3299 if (bp->dev->features & NETIF_F_TSO) 3300 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; 3301 ether_stat->feature_flags |= bp->common.boot_mode; 3302 3303 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0; 3304 3305 ether_stat->txq_size = bp->tx_ring_size; 3306 ether_stat->rxq_size = bp->rx_ring_size; 3307 3308#ifdef CONFIG_BNX2X_SRIOV 3309 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; 3310#endif 3311} 3312 3313static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3314{ 3315 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3316 struct fcoe_stats_info *fcoe_stat = 3317 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3318 3319 if (!CNIC_LOADED(bp)) 3320 return; 3321 3322 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN); 3323 3324 fcoe_stat->qos_priority = 3325 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3326 3327 /* insert FCoE stats from ramrod response */ 3328 if (!NO_FCOE(bp)) { 3329 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = 3330 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3331 tstorm_queue_statistics; 3332 3333 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = 3334 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. 3335 xstorm_queue_statistics; 3336 3337 struct fcoe_statistics_params *fw_fcoe_stat = 3338 &bp->fw_stats_data->fcoe; 3339 3340 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0, 3341 fcoe_stat->rx_bytes_lo, 3342 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); 3343 3344 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3345 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, 3346 fcoe_stat->rx_bytes_lo, 3347 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); 3348 3349 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3350 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, 3351 fcoe_stat->rx_bytes_lo, 3352 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); 3353 3354 ADD_64_LE(fcoe_stat->rx_bytes_hi, 3355 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, 3356 fcoe_stat->rx_bytes_lo, 3357 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); 3358 3359 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3360 fcoe_stat->rx_frames_lo, 3361 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); 3362 3363 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3364 fcoe_stat->rx_frames_lo, 3365 fcoe_q_tstorm_stats->rcv_ucast_pkts); 3366 3367 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3368 fcoe_stat->rx_frames_lo, 3369 fcoe_q_tstorm_stats->rcv_bcast_pkts); 3370 3371 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0, 3372 fcoe_stat->rx_frames_lo, 3373 fcoe_q_tstorm_stats->rcv_mcast_pkts); 3374 3375 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0, 3376 fcoe_stat->tx_bytes_lo, 3377 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); 3378 3379 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3380 fcoe_q_xstorm_stats->ucast_bytes_sent.hi, 3381 fcoe_stat->tx_bytes_lo, 3382 fcoe_q_xstorm_stats->ucast_bytes_sent.lo); 3383 3384 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3385 fcoe_q_xstorm_stats->bcast_bytes_sent.hi, 3386 fcoe_stat->tx_bytes_lo, 3387 fcoe_q_xstorm_stats->bcast_bytes_sent.lo); 3388 3389 ADD_64_LE(fcoe_stat->tx_bytes_hi, 3390 fcoe_q_xstorm_stats->mcast_bytes_sent.hi, 3391 fcoe_stat->tx_bytes_lo, 3392 fcoe_q_xstorm_stats->mcast_bytes_sent.lo); 3393 3394 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3395 fcoe_stat->tx_frames_lo, 3396 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); 3397 3398 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3399 fcoe_stat->tx_frames_lo, 3400 fcoe_q_xstorm_stats->ucast_pkts_sent); 3401 3402 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3403 fcoe_stat->tx_frames_lo, 3404 fcoe_q_xstorm_stats->bcast_pkts_sent); 3405 3406 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0, 3407 fcoe_stat->tx_frames_lo, 3408 fcoe_q_xstorm_stats->mcast_pkts_sent); 3409 } 3410 3411 /* ask L5 driver to add data to the struct */ 3412 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3413} 3414 3415static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3416{ 3417 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3418 struct iscsi_stats_info *iscsi_stat = 3419 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3420 3421 if (!CNIC_LOADED(bp)) 3422 return; 3423 3424 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac, 3425 ETH_ALEN); 3426 3427 iscsi_stat->qos_priority = 3428 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3429 3430 /* ask L5 driver to add data to the struct */ 3431 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3432} 3433 3434/* called due to MCP event (on pmf): 3435 * reread new bandwidth configuration 3436 * configure FW 3437 * notify others function about the change 3438 */ 3439static void bnx2x_config_mf_bw(struct bnx2x *bp) 3440{ 3441 if (bp->link_vars.link_up) { 3442 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3443 bnx2x_link_sync_notify(bp); 3444 } 3445 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3446} 3447 3448static void bnx2x_set_mf_bw(struct bnx2x *bp) 3449{ 3450 bnx2x_config_mf_bw(bp); 3451 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3452} 3453 3454static void bnx2x_handle_eee_event(struct bnx2x *bp) 3455{ 3456 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n"); 3457 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); 3458} 3459 3460static void bnx2x_handle_drv_info_req(struct bnx2x *bp) 3461{ 3462 enum drv_info_opcode op_code; 3463 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); 3464 3465 /* if drv_info version supported by MFW doesn't match - send NACK */ 3466 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { 3467 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3468 return; 3469 } 3470 3471 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> 3472 DRV_INFO_CONTROL_OP_CODE_SHIFT; 3473 3474 memset(&bp->slowpath->drv_info_to_mcp, 0, 3475 sizeof(union drv_info_to_mcp)); 3476 3477 switch (op_code) { 3478 case ETH_STATS_OPCODE: 3479 bnx2x_drv_info_ether_stat(bp); 3480 break; 3481 case FCOE_STATS_OPCODE: 3482 bnx2x_drv_info_fcoe_stat(bp); 3483 break; 3484 case ISCSI_STATS_OPCODE: 3485 bnx2x_drv_info_iscsi_stat(bp); 3486 break; 3487 default: 3488 /* if op code isn't supported - send NACK */ 3489 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); 3490 return; 3491 } 3492 3493 /* if we got drv_info attn from MFW then these fields are defined in 3494 * shmem2 for sure 3495 */ 3496 SHMEM2_WR(bp, drv_info_host_addr_lo, 3497 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3498 SHMEM2_WR(bp, drv_info_host_addr_hi, 3499 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp))); 3500 3501 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); 3502} 3503 3504static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 3505{ 3506 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 3507 3508 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 3509 3510 /* 3511 * This is the only place besides the function initialization 3512 * where the bp->flags can change so it is done without any 3513 * locks 3514 */ 3515 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) { 3516 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n"); 3517 bp->flags |= MF_FUNC_DIS; 3518 3519 bnx2x_e1h_disable(bp); 3520 } else { 3521 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n"); 3522 bp->flags &= ~MF_FUNC_DIS; 3523 3524 bnx2x_e1h_enable(bp); 3525 } 3526 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 3527 } 3528 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 3529 bnx2x_config_mf_bw(bp); 3530 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 3531 } 3532 3533 /* Report results to MCP */ 3534 if (dcc_event) 3535 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); 3536 else 3537 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); 3538} 3539 3540/* must be called under the spq lock */ 3541static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3542{ 3543 struct eth_spe *next_spe = bp->spq_prod_bd; 3544 3545 if (bp->spq_prod_bd == bp->spq_last_bd) { 3546 bp->spq_prod_bd = bp->spq; 3547 bp->spq_prod_idx = 0; 3548 DP(BNX2X_MSG_SP, "end of spq\n"); 3549 } else { 3550 bp->spq_prod_bd++; 3551 bp->spq_prod_idx++; 3552 } 3553 return next_spe; 3554} 3555 3556/* must be called under the spq lock */ 3557static void bnx2x_sp_prod_update(struct bnx2x *bp) 3558{ 3559 int func = BP_FUNC(bp); 3560 3561 /* 3562 * Make sure that BD data is updated before writing the producer: 3563 * BD data is written to the memory, the producer is read from the 3564 * memory, thus we need a full memory barrier to ensure the ordering. 3565 */ 3566 mb(); 3567 3568 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 3569 bp->spq_prod_idx); 3570 mmiowb(); 3571} 3572 3573/** 3574 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ 3575 * 3576 * @cmd: command to check 3577 * @cmd_type: command type 3578 */ 3579static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3580{ 3581 if ((cmd_type == NONE_CONNECTION_TYPE) || 3582 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3583 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || 3584 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || 3585 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || 3586 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || 3587 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) 3588 return true; 3589 else 3590 return false; 3591} 3592 3593/** 3594 * bnx2x_sp_post - place a single command on an SP ring 3595 * 3596 * @bp: driver handle 3597 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) 3598 * @cid: SW CID the command is related to 3599 * @data_hi: command private data address (high 32 bits) 3600 * @data_lo: command private data address (low 32 bits) 3601 * @cmd_type: command type (e.g. NONE, ETH) 3602 * 3603 * SP data is handled as if it's always an address pair, thus data fields are 3604 * not swapped to little endian in upper functions. Instead this function swaps 3605 * data as if it's two u32 fields. 3606 */ 3607int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 3608 u32 data_hi, u32 data_lo, int cmd_type) 3609{ 3610 struct eth_spe *spe; 3611 u16 type; 3612 bool common = bnx2x_is_contextless_ramrod(command, cmd_type); 3613 3614#ifdef BNX2X_STOP_ON_ERROR 3615 if (unlikely(bp->panic)) { 3616 BNX2X_ERR("Can't post SP when there is panic\n"); 3617 return -EIO; 3618 } 3619#endif 3620 3621 spin_lock_bh(&bp->spq_lock); 3622 3623 if (common) { 3624 if (!atomic_read(&bp->eq_spq_left)) { 3625 BNX2X_ERR("BUG! EQ ring full!\n"); 3626 spin_unlock_bh(&bp->spq_lock); 3627 bnx2x_panic(); 3628 return -EBUSY; 3629 } 3630 } else if (!atomic_read(&bp->cq_spq_left)) { 3631 BNX2X_ERR("BUG! SPQ ring full!\n"); 3632 spin_unlock_bh(&bp->spq_lock); 3633 bnx2x_panic(); 3634 return -EBUSY; 3635 } 3636 3637 spe = bnx2x_sp_get_next(bp); 3638 3639 /* CID needs port number to be encoded int it */ 3640 spe->hdr.conn_and_cmd_data = 3641 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3642 HW_CID(bp, cid)); 3643 3644 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 3645 3646 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3647 SPE_HDR_FUNCTION_ID); 3648 3649 spe->hdr.type = cpu_to_le16(type); 3650 3651 spe->data.update_data_addr.hi = cpu_to_le32(data_hi); 3652 spe->data.update_data_addr.lo = cpu_to_le32(data_lo); 3653 3654 /* 3655 * It's ok if the actual decrement is issued towards the memory 3656 * somewhere between the spin_lock and spin_unlock. Thus no 3657 * more explicit memory barrier is needed. 3658 */ 3659 if (common) 3660 atomic_dec(&bp->eq_spq_left); 3661 else 3662 atomic_dec(&bp->cq_spq_left); 3663 3664 DP(BNX2X_MSG_SP, 3665 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n", 3666 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 3667 (u32)(U64_LO(bp->spq_mapping) + 3668 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, 3669 HW_CID(bp, cid), data_hi, data_lo, type, 3670 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); 3671 3672 bnx2x_sp_prod_update(bp); 3673 spin_unlock_bh(&bp->spq_lock); 3674 return 0; 3675} 3676 3677/* acquire split MCP access lock register */ 3678static int bnx2x_acquire_alr(struct bnx2x *bp) 3679{ 3680 u32 j, val; 3681 int rc = 0; 3682 3683 might_sleep(); 3684 for (j = 0; j < 1000; j++) { 3685 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK); 3686 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK); 3687 if (val & MCPR_ACCESS_LOCK_LOCK) 3688 break; 3689 3690 usleep_range(5000, 10000); 3691 } 3692 if (!(val & MCPR_ACCESS_LOCK_LOCK)) { 3693 BNX2X_ERR("Cannot acquire MCP access lock register\n"); 3694 rc = -EBUSY; 3695 } 3696 3697 return rc; 3698} 3699 3700/* release split MCP access lock register */ 3701static void bnx2x_release_alr(struct bnx2x *bp) 3702{ 3703 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0); 3704} 3705 3706#define BNX2X_DEF_SB_ATT_IDX 0x0001 3707#define BNX2X_DEF_SB_IDX 0x0002 3708 3709static u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3710{ 3711 struct host_sp_status_block *def_sb = bp->def_status_blk; 3712 u16 rc = 0; 3713 3714 barrier(); /* status block is written to by the chip */ 3715 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 3716 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 3717 rc |= BNX2X_DEF_SB_ATT_IDX; 3718 } 3719 3720 if (bp->def_idx != def_sb->sp_sb.running_index) { 3721 bp->def_idx = def_sb->sp_sb.running_index; 3722 rc |= BNX2X_DEF_SB_IDX; 3723 } 3724 3725 /* Do not reorder: indices reading should complete before handling */ 3726 barrier(); 3727 return rc; 3728} 3729 3730/* 3731 * slow path service functions 3732 */ 3733 3734static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 3735{ 3736 int port = BP_PORT(bp); 3737 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 3738 MISC_REG_AEU_MASK_ATTN_FUNC_0; 3739 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 3740 NIG_REG_MASK_INTERRUPT_PORT0; 3741 u32 aeu_mask; 3742 u32 nig_mask = 0; 3743 u32 reg_addr; 3744 3745 if (bp->attn_state & asserted) 3746 BNX2X_ERR("IGU ERROR\n"); 3747 3748 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3749 aeu_mask = REG_RD(bp, aeu_addr); 3750 3751 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 3752 aeu_mask, asserted); 3753 aeu_mask &= ~(asserted & 0x3ff); 3754 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3755 3756 REG_WR(bp, aeu_addr, aeu_mask); 3757 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 3758 3759 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 3760 bp->attn_state |= asserted; 3761 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 3762 3763 if (asserted & ATTN_HARD_WIRED_MASK) { 3764 if (asserted & ATTN_NIG_FOR_FUNC) { 3765 3766 bnx2x_acquire_phy_lock(bp); 3767 3768 /* save nig interrupt mask */ 3769 nig_mask = REG_RD(bp, nig_int_mask_addr); 3770 3771 /* If nig_mask is not set, no need to call the update 3772 * function. 3773 */ 3774 if (nig_mask) { 3775 REG_WR(bp, nig_int_mask_addr, 0); 3776 3777 bnx2x_link_attn(bp); 3778 } 3779 3780 /* handle unicore attn? */ 3781 } 3782 if (asserted & ATTN_SW_TIMER_4_FUNC) 3783 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n"); 3784 3785 if (asserted & GPIO_2_FUNC) 3786 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n"); 3787 3788 if (asserted & GPIO_3_FUNC) 3789 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n"); 3790 3791 if (asserted & GPIO_4_FUNC) 3792 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n"); 3793 3794 if (port == 0) { 3795 if (asserted & ATTN_GENERAL_ATTN_1) { 3796 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n"); 3797 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); 3798 } 3799 if (asserted & ATTN_GENERAL_ATTN_2) { 3800 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n"); 3801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); 3802 } 3803 if (asserted & ATTN_GENERAL_ATTN_3) { 3804 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n"); 3805 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); 3806 } 3807 } else { 3808 if (asserted & ATTN_GENERAL_ATTN_4) { 3809 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n"); 3810 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); 3811 } 3812 if (asserted & ATTN_GENERAL_ATTN_5) { 3813 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n"); 3814 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); 3815 } 3816 if (asserted & ATTN_GENERAL_ATTN_6) { 3817 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n"); 3818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); 3819 } 3820 } 3821 3822 } /* if hardwired */ 3823 3824 if (bp->common.int_block == INT_BLOCK_HC) 3825 reg_addr = (HC_REG_COMMAND_REG + port*32 + 3826 COMMAND_REG_ATTN_BITS_SET); 3827 else 3828 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); 3829 3830 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted, 3831 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 3832 REG_WR(bp, reg_addr, asserted); 3833 3834 /* now set back the mask */ 3835 if (asserted & ATTN_NIG_FOR_FUNC) { 3836 /* Verify that IGU ack through BAR was written before restoring 3837 * NIG mask. This loop should exit after 2-3 iterations max. 3838 */ 3839 if (bp->common.int_block != INT_BLOCK_HC) { 3840 u32 cnt = 0, igu_acked; 3841 do { 3842 igu_acked = REG_RD(bp, 3843 IGU_REG_ATTENTION_ACK_BITS); 3844 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && 3845 (++cnt < MAX_IGU_ATTN_ACK_TO)); 3846 if (!igu_acked) 3847 DP(NETIF_MSG_HW, 3848 "Failed to verify IGU ack on time\n"); 3849 barrier(); 3850 } 3851 REG_WR(bp, nig_int_mask_addr, nig_mask); 3852 bnx2x_release_phy_lock(bp); 3853 } 3854} 3855 3856static void bnx2x_fan_failure(struct bnx2x *bp) 3857{ 3858 int port = BP_PORT(bp); 3859 u32 ext_phy_config; 3860 /* mark the failure */ 3861 ext_phy_config = 3862 SHMEM_RD(bp, 3863 dev_info.port_hw_config[port].external_phy_config); 3864 3865 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 3866 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 3867 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 3868 ext_phy_config); 3869 3870 /* log the failure */ 3871 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 3872 "Please contact OEM Support for assistance\n"); 3873 3874 /* Schedule device reset (unload) 3875 * This is due to some boards consuming sufficient power when driver is 3876 * up to overheat if fan fails. 3877 */ 3878 smp_mb__before_clear_bit(); 3879 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); 3880 smp_mb__after_clear_bit(); 3881 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3882} 3883 3884static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3885{ 3886 int port = BP_PORT(bp); 3887 int reg_offset; 3888 u32 val; 3889 3890 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 3891 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 3892 3893 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { 3894 3895 val = REG_RD(bp, reg_offset); 3896 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; 3897 REG_WR(bp, reg_offset, val); 3898 3899 BNX2X_ERR("SPIO5 hw attention\n"); 3900 3901 /* Fan failure attention */ 3902 bnx2x_hw_reset_phy(&bp->link_params); 3903 bnx2x_fan_failure(bp); 3904 } 3905 3906 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { 3907 bnx2x_acquire_phy_lock(bp); 3908 bnx2x_handle_module_detect_int(&bp->link_params); 3909 bnx2x_release_phy_lock(bp); 3910 } 3911 3912 if (attn & HW_INTERRUT_ASSERT_SET_0) { 3913 3914 val = REG_RD(bp, reg_offset); 3915 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); 3916 REG_WR(bp, reg_offset, val); 3917 3918 BNX2X_ERR("FATAL HW block attention set0 0x%x\n", 3919 (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); 3920 bnx2x_panic(); 3921 } 3922} 3923 3924static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 3925{ 3926 u32 val; 3927 3928 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { 3929 3930 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); 3931 BNX2X_ERR("DB hw attention 0x%x\n", val); 3932 /* DORQ discard attention */ 3933 if (val & 0x2) 3934 BNX2X_ERR("FATAL error from DORQ\n"); 3935 } 3936 3937 if (attn & HW_INTERRUT_ASSERT_SET_1) { 3938 3939 int port = BP_PORT(bp); 3940 int reg_offset; 3941 3942 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : 3943 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); 3944 3945 val = REG_RD(bp, reg_offset); 3946 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); 3947 REG_WR(bp, reg_offset, val); 3948 3949 BNX2X_ERR("FATAL HW block attention set1 0x%x\n", 3950 (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); 3951 bnx2x_panic(); 3952 } 3953} 3954 3955static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 3956{ 3957 u32 val; 3958 3959 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { 3960 3961 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); 3962 BNX2X_ERR("CFC hw attention 0x%x\n", val); 3963 /* CFC error attention */ 3964 if (val & 0x2) 3965 BNX2X_ERR("FATAL error from CFC\n"); 3966 } 3967 3968 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { 3969 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); 3970 BNX2X_ERR("PXP hw attention-0 0x%x\n", val); 3971 /* RQ_USDMDP_FIFO_OVERFLOW */ 3972 if (val & 0x18000) 3973 BNX2X_ERR("FATAL error from PXP\n"); 3974 3975 if (!CHIP_IS_E1x(bp)) { 3976 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); 3977 BNX2X_ERR("PXP hw attention-1 0x%x\n", val); 3978 } 3979 } 3980 3981 if (attn & HW_INTERRUT_ASSERT_SET_2) { 3982 3983 int port = BP_PORT(bp); 3984 int reg_offset; 3985 3986 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : 3987 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); 3988 3989 val = REG_RD(bp, reg_offset); 3990 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); 3991 REG_WR(bp, reg_offset, val); 3992 3993 BNX2X_ERR("FATAL HW block attention set2 0x%x\n", 3994 (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); 3995 bnx2x_panic(); 3996 } 3997} 3998 3999static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 4000{ 4001 u32 val; 4002 4003 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { 4004 4005 if (attn & BNX2X_PMF_LINK_ASSERT) { 4006 int func = BP_FUNC(bp); 4007 4008 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 4009 bnx2x_read_mf_cfg(bp); 4010 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 4011 func_mf_config[BP_ABS_FUNC(bp)].config); 4012 val = SHMEM_RD(bp, 4013 func_mb[BP_FW_MB_IDX(bp)].drv_status); 4014 if (val & DRV_STATUS_DCC_EVENT_MASK) 4015 bnx2x_dcc_event(bp, 4016 (val & DRV_STATUS_DCC_EVENT_MASK)); 4017 4018 if (val & DRV_STATUS_SET_MF_BW) 4019 bnx2x_set_mf_bw(bp); 4020 4021 if (val & DRV_STATUS_DRV_INFO_REQ) 4022 bnx2x_handle_drv_info_req(bp); 4023 4024 if (val & DRV_STATUS_VF_DISABLED) 4025 bnx2x_vf_handle_flr_event(bp); 4026 4027 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 4028 bnx2x_pmf_update(bp); 4029 4030 if (bp->port.pmf && 4031 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && 4032 bp->dcbx_enabled > 0) 4033 /* start dcbx state machine */ 4034 bnx2x_dcbx_set_params(bp, 4035 BNX2X_DCBX_STATE_NEG_RECEIVED); 4036 if (val & DRV_STATUS_AFEX_EVENT_MASK) 4037 bnx2x_handle_afex_cmd(bp, 4038 val & DRV_STATUS_AFEX_EVENT_MASK); 4039 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) 4040 bnx2x_handle_eee_event(bp); 4041 if (bp->link_vars.periodic_flags & 4042 PERIODIC_FLAGS_LINK_EVENT) { 4043 /* sync with link */ 4044 bnx2x_acquire_phy_lock(bp); 4045 bp->link_vars.periodic_flags &= 4046 ~PERIODIC_FLAGS_LINK_EVENT; 4047 bnx2x_release_phy_lock(bp); 4048 if (IS_MF(bp)) 4049 bnx2x_link_sync_notify(bp); 4050 bnx2x_link_report(bp); 4051 } 4052 /* Always call it here: bnx2x_link_report() will 4053 * prevent the link indication duplication. 4054 */ 4055 bnx2x__link_status_update(bp); 4056 } else if (attn & BNX2X_MC_ASSERT_BITS) { 4057 4058 BNX2X_ERR("MC assert!\n"); 4059 bnx2x_mc_assert(bp); 4060 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); 4061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); 4062 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); 4063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); 4064 bnx2x_panic(); 4065 4066 } else if (attn & BNX2X_MCP_ASSERT) { 4067 4068 BNX2X_ERR("MCP assert!\n"); 4069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); 4070 bnx2x_fw_dump(bp); 4071 4072 } else 4073 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); 4074 } 4075 4076 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 4077 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 4078 if (attn & BNX2X_GRC_TIMEOUT) { 4079 val = CHIP_IS_E1(bp) ? 0 : 4080 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN); 4081 BNX2X_ERR("GRC time-out 0x%08x\n", val); 4082 } 4083 if (attn & BNX2X_GRC_RSV) { 4084 val = CHIP_IS_E1(bp) ? 0 : 4085 REG_RD(bp, MISC_REG_GRC_RSV_ATTN); 4086 BNX2X_ERR("GRC reserved 0x%08x\n", val); 4087 } 4088 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 4089 } 4090} 4091 4092/* 4093 * Bits map: 4094 * 0-7 - Engine0 load counter. 4095 * 8-15 - Engine1 load counter. 4096 * 16 - Engine0 RESET_IN_PROGRESS bit. 4097 * 17 - Engine1 RESET_IN_PROGRESS bit. 4098 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function 4099 * on the engine 4100 * 19 - Engine1 ONE_IS_LOADED. 4101 * 20 - Chip reset flow bit. When set none-leader must wait for both engines 4102 * leader to complete (check for both RESET_IN_PROGRESS bits and not for 4103 * just the one belonging to its engine). 4104 * 4105 */ 4106#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 4107 4108#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff 4109#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 4110#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 4111#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 4112#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 4113#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 4114#define BNX2X_GLOBAL_RESET_BIT 0x00040000 4115 4116/* 4117 * Set the GLOBAL_RESET bit. 4118 * 4119 * Should be run under rtnl lock 4120 */ 4121void bnx2x_set_reset_global(struct bnx2x *bp) 4122{ 4123 u32 val; 4124 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4125 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4126 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); 4127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4128} 4129 4130/* 4131 * Clear the GLOBAL_RESET bit. 4132 * 4133 * Should be run under rtnl lock 4134 */ 4135static void bnx2x_clear_reset_global(struct bnx2x *bp) 4136{ 4137 u32 val; 4138 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4139 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4140 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); 4141 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4142} 4143 4144/* 4145 * Checks the GLOBAL_RESET bit. 4146 * 4147 * should be run under rtnl lock 4148 */ 4149static bool bnx2x_reset_is_global(struct bnx2x *bp) 4150{ 4151 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4152 4153 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); 4154 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; 4155} 4156 4157/* 4158 * Clear RESET_IN_PROGRESS bit for the current engine. 4159 * 4160 * Should be run under rtnl lock 4161 */ 4162static void bnx2x_set_reset_done(struct bnx2x *bp) 4163{ 4164 u32 val; 4165 u32 bit = BP_PATH(bp) ? 4166 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4167 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4168 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4169 4170 /* Clear the bit */ 4171 val &= ~bit; 4172 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4173 4174 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4175} 4176 4177/* 4178 * Set RESET_IN_PROGRESS for the current engine. 4179 * 4180 * should be run under rtnl lock 4181 */ 4182void bnx2x_set_reset_in_progress(struct bnx2x *bp) 4183{ 4184 u32 val; 4185 u32 bit = BP_PATH(bp) ? 4186 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4187 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4188 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4189 4190 /* Set the bit */ 4191 val |= bit; 4192 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4193 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4194} 4195 4196/* 4197 * Checks the RESET_IN_PROGRESS bit for the given engine. 4198 * should be run under rtnl lock 4199 */ 4200bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) 4201{ 4202 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4203 u32 bit = engine ? 4204 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; 4205 4206 /* return false if bit is set */ 4207 return (val & bit) ? false : true; 4208} 4209 4210/* 4211 * set pf load for the current pf. 4212 * 4213 * should be run under rtnl lock 4214 */ 4215void bnx2x_set_pf_load(struct bnx2x *bp) 4216{ 4217 u32 val1, val; 4218 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4219 BNX2X_PATH0_LOAD_CNT_MASK; 4220 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4221 BNX2X_PATH0_LOAD_CNT_SHIFT; 4222 4223 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4224 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4225 4226 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val); 4227 4228 /* get the current counter value */ 4229 val1 = (val & mask) >> shift; 4230 4231 /* set bit of that PF */ 4232 val1 |= (1 << bp->pf_num); 4233 4234 /* clear the old value */ 4235 val &= ~mask; 4236 4237 /* set the new one */ 4238 val |= ((val1 << shift) & mask); 4239 4240 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4241 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4242} 4243 4244/** 4245 * bnx2x_clear_pf_load - clear pf load mark 4246 * 4247 * @bp: driver handle 4248 * 4249 * Should be run under rtnl lock. 4250 * Decrements the load counter for the current engine. Returns 4251 * whether other functions are still loaded 4252 */ 4253bool bnx2x_clear_pf_load(struct bnx2x *bp) 4254{ 4255 u32 val1, val; 4256 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4257 BNX2X_PATH0_LOAD_CNT_MASK; 4258 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4259 BNX2X_PATH0_LOAD_CNT_SHIFT; 4260 4261 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4262 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4263 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val); 4264 4265 /* get the current counter value */ 4266 val1 = (val & mask) >> shift; 4267 4268 /* clear bit of that PF */ 4269 val1 &= ~(1 << bp->pf_num); 4270 4271 /* clear the old value */ 4272 val &= ~mask; 4273 4274 /* set the new one */ 4275 val |= ((val1 << shift) & mask); 4276 4277 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); 4278 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4279 return val1 != 0; 4280} 4281 4282/* 4283 * Read the load status for the current engine. 4284 * 4285 * should be run under rtnl lock 4286 */ 4287static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 4288{ 4289 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 4290 BNX2X_PATH0_LOAD_CNT_MASK); 4291 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : 4292 BNX2X_PATH0_LOAD_CNT_SHIFT); 4293 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 4294 4295 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val); 4296 4297 val = (val & mask) >> shift; 4298 4299 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n", 4300 engine, val); 4301 4302 return val != 0; 4303} 4304 4305static void _print_parity(struct bnx2x *bp, u32 reg) 4306{ 4307 pr_cont(" [0x%08x] ", REG_RD(bp, reg)); 4308} 4309 4310static void _print_next_block(int idx, const char *blk) 4311{ 4312 pr_cont("%s%s", idx ? ", " : "", blk); 4313} 4314 4315static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, 4316 int *par_num, bool print) 4317{ 4318 u32 cur_bit; 4319 bool res; 4320 int i; 4321 4322 res = false; 4323 4324 for (i = 0; sig; i++) { 4325 cur_bit = (0x1UL << i); 4326 if (sig & cur_bit) { 4327 res |= true; /* Each bit is real error! */ 4328 4329 if (print) { 4330 switch (cur_bit) { 4331 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: 4332 _print_next_block((*par_num)++, "BRB"); 4333 _print_parity(bp, 4334 BRB1_REG_BRB1_PRTY_STS); 4335 break; 4336 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: 4337 _print_next_block((*par_num)++, 4338 "PARSER"); 4339 _print_parity(bp, PRS_REG_PRS_PRTY_STS); 4340 break; 4341 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: 4342 _print_next_block((*par_num)++, "TSDM"); 4343 _print_parity(bp, 4344 TSDM_REG_TSDM_PRTY_STS); 4345 break; 4346 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: 4347 _print_next_block((*par_num)++, 4348 "SEARCHER"); 4349 _print_parity(bp, SRC_REG_SRC_PRTY_STS); 4350 break; 4351 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: 4352 _print_next_block((*par_num)++, "TCM"); 4353 _print_parity(bp, TCM_REG_TCM_PRTY_STS); 4354 break; 4355 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: 4356 _print_next_block((*par_num)++, 4357 "TSEMI"); 4358 _print_parity(bp, 4359 TSEM_REG_TSEM_PRTY_STS_0); 4360 _print_parity(bp, 4361 TSEM_REG_TSEM_PRTY_STS_1); 4362 break; 4363 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: 4364 _print_next_block((*par_num)++, "XPB"); 4365 _print_parity(bp, GRCBASE_XPB + 4366 PB_REG_PB_PRTY_STS); 4367 break; 4368 } 4369 } 4370 4371 /* Clear the bit */ 4372 sig &= ~cur_bit; 4373 } 4374 } 4375 4376 return res; 4377} 4378 4379static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, 4380 int *par_num, bool *global, 4381 bool print) 4382{ 4383 u32 cur_bit; 4384 bool res; 4385 int i; 4386 4387 res = false; 4388 4389 for (i = 0; sig; i++) { 4390 cur_bit = (0x1UL << i); 4391 if (sig & cur_bit) { 4392 res |= true; /* Each bit is real error! */ 4393 switch (cur_bit) { 4394 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: 4395 if (print) { 4396 _print_next_block((*par_num)++, "PBF"); 4397 _print_parity(bp, PBF_REG_PBF_PRTY_STS); 4398 } 4399 break; 4400 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: 4401 if (print) { 4402 _print_next_block((*par_num)++, "QM"); 4403 _print_parity(bp, QM_REG_QM_PRTY_STS); 4404 } 4405 break; 4406 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: 4407 if (print) { 4408 _print_next_block((*par_num)++, "TM"); 4409 _print_parity(bp, TM_REG_TM_PRTY_STS); 4410 } 4411 break; 4412 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: 4413 if (print) { 4414 _print_next_block((*par_num)++, "XSDM"); 4415 _print_parity(bp, 4416 XSDM_REG_XSDM_PRTY_STS); 4417 } 4418 break; 4419 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: 4420 if (print) { 4421 _print_next_block((*par_num)++, "XCM"); 4422 _print_parity(bp, XCM_REG_XCM_PRTY_STS); 4423 } 4424 break; 4425 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: 4426 if (print) { 4427 _print_next_block((*par_num)++, 4428 "XSEMI"); 4429 _print_parity(bp, 4430 XSEM_REG_XSEM_PRTY_STS_0); 4431 _print_parity(bp, 4432 XSEM_REG_XSEM_PRTY_STS_1); 4433 } 4434 break; 4435 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: 4436 if (print) { 4437 _print_next_block((*par_num)++, 4438 "DOORBELLQ"); 4439 _print_parity(bp, 4440 DORQ_REG_DORQ_PRTY_STS); 4441 } 4442 break; 4443 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: 4444 if (print) { 4445 _print_next_block((*par_num)++, "NIG"); 4446 if (CHIP_IS_E1x(bp)) { 4447 _print_parity(bp, 4448 NIG_REG_NIG_PRTY_STS); 4449 } else { 4450 _print_parity(bp, 4451 NIG_REG_NIG_PRTY_STS_0); 4452 _print_parity(bp, 4453 NIG_REG_NIG_PRTY_STS_1); 4454 } 4455 } 4456 break; 4457 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: 4458 if (print) 4459 _print_next_block((*par_num)++, 4460 "VAUX PCI CORE"); 4461 *global = true; 4462 break; 4463 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: 4464 if (print) { 4465 _print_next_block((*par_num)++, 4466 "DEBUG"); 4467 _print_parity(bp, DBG_REG_DBG_PRTY_STS); 4468 } 4469 break; 4470 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: 4471 if (print) { 4472 _print_next_block((*par_num)++, "USDM"); 4473 _print_parity(bp, 4474 USDM_REG_USDM_PRTY_STS); 4475 } 4476 break; 4477 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: 4478 if (print) { 4479 _print_next_block((*par_num)++, "UCM"); 4480 _print_parity(bp, UCM_REG_UCM_PRTY_STS); 4481 } 4482 break; 4483 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: 4484 if (print) { 4485 _print_next_block((*par_num)++, 4486 "USEMI"); 4487 _print_parity(bp, 4488 USEM_REG_USEM_PRTY_STS_0); 4489 _print_parity(bp, 4490 USEM_REG_USEM_PRTY_STS_1); 4491 } 4492 break; 4493 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: 4494 if (print) { 4495 _print_next_block((*par_num)++, "UPB"); 4496 _print_parity(bp, GRCBASE_UPB + 4497 PB_REG_PB_PRTY_STS); 4498 } 4499 break; 4500 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: 4501 if (print) { 4502 _print_next_block((*par_num)++, "CSDM"); 4503 _print_parity(bp, 4504 CSDM_REG_CSDM_PRTY_STS); 4505 } 4506 break; 4507 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: 4508 if (print) { 4509 _print_next_block((*par_num)++, "CCM"); 4510 _print_parity(bp, CCM_REG_CCM_PRTY_STS); 4511 } 4512 break; 4513 } 4514 4515 /* Clear the bit */ 4516 sig &= ~cur_bit; 4517 } 4518 } 4519 4520 return res; 4521} 4522 4523static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, 4524 int *par_num, bool print) 4525{ 4526 u32 cur_bit; 4527 bool res; 4528 int i; 4529 4530 res = false; 4531 4532 for (i = 0; sig; i++) { 4533 cur_bit = (0x1UL << i); 4534 if (sig & cur_bit) { 4535 res |= true; /* Each bit is real error! */ 4536 if (print) { 4537 switch (cur_bit) { 4538 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: 4539 _print_next_block((*par_num)++, 4540 "CSEMI"); 4541 _print_parity(bp, 4542 CSEM_REG_CSEM_PRTY_STS_0); 4543 _print_parity(bp, 4544 CSEM_REG_CSEM_PRTY_STS_1); 4545 break; 4546 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: 4547 _print_next_block((*par_num)++, "PXP"); 4548 _print_parity(bp, PXP_REG_PXP_PRTY_STS); 4549 _print_parity(bp, 4550 PXP2_REG_PXP2_PRTY_STS_0); 4551 _print_parity(bp, 4552 PXP2_REG_PXP2_PRTY_STS_1); 4553 break; 4554 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: 4555 _print_next_block((*par_num)++, 4556 "PXPPCICLOCKCLIENT"); 4557 break; 4558 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: 4559 _print_next_block((*par_num)++, "CFC"); 4560 _print_parity(bp, 4561 CFC_REG_CFC_PRTY_STS); 4562 break; 4563 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: 4564 _print_next_block((*par_num)++, "CDU"); 4565 _print_parity(bp, CDU_REG_CDU_PRTY_STS); 4566 break; 4567 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: 4568 _print_next_block((*par_num)++, "DMAE"); 4569 _print_parity(bp, 4570 DMAE_REG_DMAE_PRTY_STS); 4571 break; 4572 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: 4573 _print_next_block((*par_num)++, "IGU"); 4574 if (CHIP_IS_E1x(bp)) 4575 _print_parity(bp, 4576 HC_REG_HC_PRTY_STS); 4577 else 4578 _print_parity(bp, 4579 IGU_REG_IGU_PRTY_STS); 4580 break; 4581 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: 4582 _print_next_block((*par_num)++, "MISC"); 4583 _print_parity(bp, 4584 MISC_REG_MISC_PRTY_STS); 4585 break; 4586 } 4587 } 4588 4589 /* Clear the bit */ 4590 sig &= ~cur_bit; 4591 } 4592 } 4593 4594 return res; 4595} 4596 4597static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, 4598 int *par_num, bool *global, 4599 bool print) 4600{ 4601 bool res = false; 4602 u32 cur_bit; 4603 int i; 4604 4605 for (i = 0; sig; i++) { 4606 cur_bit = (0x1UL << i); 4607 if (sig & cur_bit) { 4608 switch (cur_bit) { 4609 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: 4610 if (print) 4611 _print_next_block((*par_num)++, 4612 "MCP ROM"); 4613 *global = true; 4614 res |= true; 4615 break; 4616 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: 4617 if (print) 4618 _print_next_block((*par_num)++, 4619 "MCP UMP RX"); 4620 *global = true; 4621 res |= true; 4622 break; 4623 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: 4624 if (print) 4625 _print_next_block((*par_num)++, 4626 "MCP UMP TX"); 4627 *global = true; 4628 res |= true; 4629 break; 4630 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4631 if (print) 4632 _print_next_block((*par_num)++, 4633 "MCP SCPAD"); 4634 /* clear latched SCPAD PATIRY from MCP */ 4635 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 4636 1UL << 10); 4637 break; 4638 } 4639 4640 /* Clear the bit */ 4641 sig &= ~cur_bit; 4642 } 4643 } 4644 4645 return res; 4646} 4647 4648static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, 4649 int *par_num, bool print) 4650{ 4651 u32 cur_bit; 4652 bool res; 4653 int i; 4654 4655 res = false; 4656 4657 for (i = 0; sig; i++) { 4658 cur_bit = (0x1UL << i); 4659 if (sig & cur_bit) { 4660 res |= true; /* Each bit is real error! */ 4661 if (print) { 4662 switch (cur_bit) { 4663 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: 4664 _print_next_block((*par_num)++, 4665 "PGLUE_B"); 4666 _print_parity(bp, 4667 PGLUE_B_REG_PGLUE_B_PRTY_STS); 4668 break; 4669 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: 4670 _print_next_block((*par_num)++, "ATC"); 4671 _print_parity(bp, 4672 ATC_REG_ATC_PRTY_STS); 4673 break; 4674 } 4675 } 4676 /* Clear the bit */ 4677 sig &= ~cur_bit; 4678 } 4679 } 4680 4681 return res; 4682} 4683 4684static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4685 u32 *sig) 4686{ 4687 bool res = false; 4688 4689 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4690 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4691 (sig[2] & HW_PRTY_ASSERT_SET_2) || 4692 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4693 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4694 int par_num = 0; 4695 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4696 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4697 sig[0] & HW_PRTY_ASSERT_SET_0, 4698 sig[1] & HW_PRTY_ASSERT_SET_1, 4699 sig[2] & HW_PRTY_ASSERT_SET_2, 4700 sig[3] & HW_PRTY_ASSERT_SET_3, 4701 sig[4] & HW_PRTY_ASSERT_SET_4); 4702 if (print) 4703 netdev_err(bp->dev, 4704 "Parity errors detected in blocks: "); 4705 res |= bnx2x_check_blocks_with_parity0(bp, 4706 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); 4707 res |= bnx2x_check_blocks_with_parity1(bp, 4708 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); 4709 res |= bnx2x_check_blocks_with_parity2(bp, 4710 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); 4711 res |= bnx2x_check_blocks_with_parity3(bp, 4712 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); 4713 res |= bnx2x_check_blocks_with_parity4(bp, 4714 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print); 4715 4716 if (print) 4717 pr_cont("\n"); 4718 } 4719 4720 return res; 4721} 4722 4723/** 4724 * bnx2x_chk_parity_attn - checks for parity attentions. 4725 * 4726 * @bp: driver handle 4727 * @global: true if there was a global attention 4728 * @print: show parity attention in syslog 4729 */ 4730bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) 4731{ 4732 struct attn_route attn = { {0} }; 4733 int port = BP_PORT(bp); 4734 4735 attn.sig[0] = REG_RD(bp, 4736 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + 4737 port*4); 4738 attn.sig[1] = REG_RD(bp, 4739 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + 4740 port*4); 4741 attn.sig[2] = REG_RD(bp, 4742 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + 4743 port*4); 4744 attn.sig[3] = REG_RD(bp, 4745 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + 4746 port*4); 4747 /* Since MCP attentions can't be disabled inside the block, we need to 4748 * read AEU registers to see whether they're currently disabled 4749 */ 4750 attn.sig[3] &= ((REG_RD(bp, 4751 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 4752 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) & 4753 MISC_AEU_ENABLE_MCP_PRTY_BITS) | 4754 ~MISC_AEU_ENABLE_MCP_PRTY_BITS); 4755 4756 if (!CHIP_IS_E1x(bp)) 4757 attn.sig[4] = REG_RD(bp, 4758 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + 4759 port*4); 4760 4761 return bnx2x_parity_attn(bp, global, print, attn.sig); 4762} 4763 4764static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4765{ 4766 u32 val; 4767 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4768 4769 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); 4770 BNX2X_ERR("PGLUE hw attention 0x%x\n", val); 4771 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) 4772 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); 4773 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) 4774 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); 4775 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) 4776 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); 4777 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) 4778 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); 4779 if (val & 4780 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) 4781 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); 4782 if (val & 4783 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) 4784 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); 4785 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) 4786 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); 4787 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) 4788 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); 4789 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) 4790 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); 4791 } 4792 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { 4793 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR); 4794 BNX2X_ERR("ATC hw attention 0x%x\n", val); 4795 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) 4796 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); 4797 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) 4798 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); 4799 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) 4800 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); 4801 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) 4802 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); 4803 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) 4804 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); 4805 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) 4806 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); 4807 } 4808 4809 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4810 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { 4811 BNX2X_ERR("FATAL parity attention set4 0x%x\n", 4812 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | 4813 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); 4814 } 4815} 4816 4817static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4818{ 4819 struct attn_route attn, *group_mask; 4820 int port = BP_PORT(bp); 4821 int index; 4822 u32 reg_addr; 4823 u32 val; 4824 u32 aeu_mask; 4825 bool global = false; 4826 4827 /* need to take HW lock because MCP or other port might also 4828 try to handle this event */ 4829 bnx2x_acquire_alr(bp); 4830 4831 if (bnx2x_chk_parity_attn(bp, &global, true)) { 4832#ifndef BNX2X_STOP_ON_ERROR 4833 bp->recovery_state = BNX2X_RECOVERY_INIT; 4834 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4835 /* Disable HW interrupts */ 4836 bnx2x_int_disable(bp); 4837 /* In case of parity errors don't handle attentions so that 4838 * other function would "see" parity errors. 4839 */ 4840#else 4841 bnx2x_panic(); 4842#endif 4843 bnx2x_release_alr(bp); 4844 return; 4845 } 4846 4847 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 4848 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 4849 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 4850 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 4851 if (!CHIP_IS_E1x(bp)) 4852 attn.sig[4] = 4853 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); 4854 else 4855 attn.sig[4] = 0; 4856 4857 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n", 4858 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); 4859 4860 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4861 if (deasserted & (1 << index)) { 4862 group_mask = &bp->attn_group[index]; 4863 4864 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n", 4865 index, 4866 group_mask->sig[0], group_mask->sig[1], 4867 group_mask->sig[2], group_mask->sig[3], 4868 group_mask->sig[4]); 4869 4870 bnx2x_attn_int_deasserted4(bp, 4871 attn.sig[4] & group_mask->sig[4]); 4872 bnx2x_attn_int_deasserted3(bp, 4873 attn.sig[3] & group_mask->sig[3]); 4874 bnx2x_attn_int_deasserted1(bp, 4875 attn.sig[1] & group_mask->sig[1]); 4876 bnx2x_attn_int_deasserted2(bp, 4877 attn.sig[2] & group_mask->sig[2]); 4878 bnx2x_attn_int_deasserted0(bp, 4879 attn.sig[0] & group_mask->sig[0]); 4880 } 4881 } 4882 4883 bnx2x_release_alr(bp); 4884 4885 if (bp->common.int_block == INT_BLOCK_HC) 4886 reg_addr = (HC_REG_COMMAND_REG + port*32 + 4887 COMMAND_REG_ATTN_BITS_CLR); 4888 else 4889 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); 4890 4891 val = ~deasserted; 4892 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val, 4893 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); 4894 REG_WR(bp, reg_addr, val); 4895 4896 if (~bp->attn_state & deasserted) 4897 BNX2X_ERR("IGU ERROR\n"); 4898 4899 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4900 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4901 4902 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4903 aeu_mask = REG_RD(bp, reg_addr); 4904 4905 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 4906 aeu_mask, deasserted); 4907 aeu_mask |= (deasserted & 0x3ff); 4908 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 4909 4910 REG_WR(bp, reg_addr, aeu_mask); 4911 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); 4912 4913 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 4914 bp->attn_state &= ~deasserted; 4915 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state); 4916} 4917 4918static void bnx2x_attn_int(struct bnx2x *bp) 4919{ 4920 /* read local copy of bits */ 4921 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block. 4922 attn_bits); 4923 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block. 4924 attn_bits_ack); 4925 u32 attn_state = bp->attn_state; 4926 4927 /* look for changed bits */ 4928 u32 asserted = attn_bits & ~attn_ack & ~attn_state; 4929 u32 deasserted = ~attn_bits & attn_ack & attn_state; 4930 4931 DP(NETIF_MSG_HW, 4932 "attn_bits %x attn_ack %x asserted %x deasserted %x\n", 4933 attn_bits, attn_ack, asserted, deasserted); 4934 4935 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) 4936 BNX2X_ERR("BAD attention state\n"); 4937 4938 /* handle bits that were raised */ 4939 if (asserted) 4940 bnx2x_attn_int_asserted(bp, asserted); 4941 4942 if (deasserted) 4943 bnx2x_attn_int_deasserted(bp, deasserted); 4944} 4945 4946void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 4947 u16 index, u8 op, u8 update) 4948{ 4949 u32 igu_addr = bp->igu_base_addr; 4950 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; 4951 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, 4952 igu_addr); 4953} 4954 4955static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 4956{ 4957 /* No memory barriers */ 4958 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 4959 mmiowb(); /* keep prod updates ordered */ 4960} 4961 4962static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 4963 union event_ring_elem *elem) 4964{ 4965 u8 err = elem->message.error; 4966 4967 if (!bp->cnic_eth_dev.starting_cid || 4968 (cid < bp->cnic_eth_dev.starting_cid && 4969 cid != bp->cnic_eth_dev.iscsi_l2_cid)) 4970 return 1; 4971 4972 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); 4973 4974 if (unlikely(err)) { 4975 4976 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", 4977 cid); 4978 bnx2x_panic_dump(bp, false); 4979 } 4980 bnx2x_cnic_cfc_comp(bp, cid, err); 4981 return 0; 4982} 4983 4984static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4985{ 4986 struct bnx2x_mcast_ramrod_params rparam; 4987 int rc; 4988 4989 memset(&rparam, 0, sizeof(rparam)); 4990 4991 rparam.mcast_obj = &bp->mcast_obj; 4992 4993 netif_addr_lock_bh(bp->dev); 4994 4995 /* Clear pending state for the last command */ 4996 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); 4997 4998 /* If there are pending mcast commands - send them */ 4999 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { 5000 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 5001 if (rc < 0) 5002 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 5003 rc); 5004 } 5005 5006 netif_addr_unlock_bh(bp->dev); 5007} 5008 5009static void bnx2x_handle_classification_eqe(struct bnx2x *bp, 5010 union event_ring_elem *elem) 5011{ 5012 unsigned long ramrod_flags = 0; 5013 int rc = 0; 5014 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 5015 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 5016 5017 /* Always push next commands out, don't wait here */ 5018 __set_bit(RAMROD_CONT, &ramrod_flags); 5019 5020 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) 5021 >> BNX2X_SWCID_SHIFT) { 5022 case BNX2X_FILTER_MAC_PENDING: 5023 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 5024 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) 5025 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 5026 else 5027 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 5028 5029 break; 5030 case BNX2X_FILTER_MCAST_PENDING: 5031 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n"); 5032 /* This is only relevant for 57710 where multicast MACs are 5033 * configured as unicast MACs using the same ramrod. 5034 */ 5035 bnx2x_handle_mcast_eqe(bp); 5036 return; 5037 default: 5038 BNX2X_ERR("Unsupported classification command: %d\n", 5039 elem->message.data.eth_event.echo); 5040 return; 5041 } 5042 5043 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); 5044 5045 if (rc < 0) 5046 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 5047 else if (rc > 0) 5048 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); 5049} 5050 5051static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 5052 5053static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 5054{ 5055 netif_addr_lock_bh(bp->dev); 5056 5057 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5058 5059 /* Send rx_mode command again if was requested */ 5060 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 5061 bnx2x_set_storm_rx_mode(bp); 5062 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 5063 &bp->sp_state)) 5064 bnx2x_set_iscsi_eth_rx_mode(bp, true); 5065 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 5066 &bp->sp_state)) 5067 bnx2x_set_iscsi_eth_rx_mode(bp, false); 5068 5069 netif_addr_unlock_bh(bp->dev); 5070} 5071 5072static void bnx2x_after_afex_vif_lists(struct bnx2x *bp, 5073 union event_ring_elem *elem) 5074{ 5075 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) { 5076 DP(BNX2X_MSG_SP, 5077 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n", 5078 elem->message.data.vif_list_event.func_bit_map); 5079 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK, 5080 elem->message.data.vif_list_event.func_bit_map); 5081 } else if (elem->message.data.vif_list_event.echo == 5082 VIF_LIST_RULE_SET) { 5083 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n"); 5084 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0); 5085 } 5086} 5087 5088/* called with rtnl_lock */ 5089static void bnx2x_after_function_update(struct bnx2x *bp) 5090{ 5091 int q, rc; 5092 struct bnx2x_fastpath *fp; 5093 struct bnx2x_queue_state_params queue_params = {NULL}; 5094 struct bnx2x_queue_update_params *q_update_params = 5095 &queue_params.params.update; 5096 5097 /* Send Q update command with afex vlan removal values for all Qs */ 5098 queue_params.cmd = BNX2X_Q_CMD_UPDATE; 5099 5100 /* set silent vlan removal values according to vlan mode */ 5101 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 5102 &q_update_params->update_flags); 5103 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 5104 &q_update_params->update_flags); 5105 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5106 5107 /* in access mode mark mask and value are 0 to strip all vlans */ 5108 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) { 5109 q_update_params->silent_removal_value = 0; 5110 q_update_params->silent_removal_mask = 0; 5111 } else { 5112 q_update_params->silent_removal_value = 5113 (bp->afex_def_vlan_tag & VLAN_VID_MASK); 5114 q_update_params->silent_removal_mask = VLAN_VID_MASK; 5115 } 5116 5117 for_each_eth_queue(bp, q) { 5118 /* Set the appropriate Queue object */ 5119 fp = &bp->fp[q]; 5120 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5121 5122 /* send the ramrod */ 5123 rc = bnx2x_queue_state_change(bp, &queue_params); 5124 if (rc < 0) 5125 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5126 q); 5127 } 5128 5129 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { 5130 fp = &bp->fp[FCOE_IDX(bp)]; 5131 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 5132 5133 /* clear pending completion bit */ 5134 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5135 5136 /* mark latest Q bit */ 5137 smp_mb__before_clear_bit(); 5138 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 5139 smp_mb__after_clear_bit(); 5140 5141 /* send Q update ramrod for FCoE Q */ 5142 rc = bnx2x_queue_state_change(bp, &queue_params); 5143 if (rc < 0) 5144 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n", 5145 q); 5146 } else { 5147 /* If no FCoE ring - ACK MCP now */ 5148 bnx2x_link_report(bp); 5149 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5150 } 5151} 5152 5153static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 5154 struct bnx2x *bp, u32 cid) 5155{ 5156 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 5157 5158 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp))) 5159 return &bnx2x_fcoe_sp_obj(bp, q_obj); 5160 else 5161 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 5162} 5163 5164static void bnx2x_eq_int(struct bnx2x *bp) 5165{ 5166 u16 hw_cons, sw_cons, sw_prod; 5167 union event_ring_elem *elem; 5168 u8 echo; 5169 u32 cid; 5170 u8 opcode; 5171 int rc, spqe_cnt = 0; 5172 struct bnx2x_queue_sp_obj *q_obj; 5173 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; 5174 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; 5175 5176 hw_cons = le16_to_cpu(*bp->eq_cons_sb); 5177 5178 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256. 5179 * when we get the next-page we need to adjust so the loop 5180 * condition below will be met. The next element is the size of a 5181 * regular element and hence incrementing by 1 5182 */ 5183 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) 5184 hw_cons++; 5185 5186 /* This function may never run in parallel with itself for a 5187 * specific bp, thus there is no need in "paired" read memory 5188 * barrier here. 5189 */ 5190 sw_cons = bp->eq_cons; 5191 sw_prod = bp->eq_prod; 5192 5193 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", 5194 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); 5195 5196 for (; sw_cons != hw_cons; 5197 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { 5198 5199 elem = &bp->eq_ring[EQ_DESC(sw_cons)]; 5200 5201 rc = bnx2x_iov_eq_sp_event(bp, elem); 5202 if (!rc) { 5203 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n", 5204 rc); 5205 goto next_spqe; 5206 } 5207 5208 /* elem CID originates from FW; actually LE */ 5209 cid = SW_CID((__force __le32) 5210 elem->message.data.cfc_del_event.cid); 5211 opcode = elem->message.opcode; 5212 5213 /* handle eq element */ 5214 switch (opcode) { 5215 case EVENT_RING_OPCODE_VF_PF_CHANNEL: 5216 DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); 5217 bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); 5218 continue; 5219 5220 case EVENT_RING_OPCODE_STAT_QUERY: 5221 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, 5222 "got statistics comp event %d\n", 5223 bp->stats_comp++); 5224 /* nothing to do with stats comp */ 5225 goto next_spqe; 5226 5227 case EVENT_RING_OPCODE_CFC_DEL: 5228 /* handle according to cid range */ 5229 /* 5230 * we may want to verify here that the bp state is 5231 * HALTING 5232 */ 5233 DP(BNX2X_MSG_SP, 5234 "got delete ramrod for MULTI[%d]\n", cid); 5235 5236 if (CNIC_LOADED(bp) && 5237 !bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 5238 goto next_spqe; 5239 5240 q_obj = bnx2x_cid_to_q_obj(bp, cid); 5241 5242 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 5243 break; 5244 5245 goto next_spqe; 5246 5247 case EVENT_RING_OPCODE_STOP_TRAFFIC: 5248 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); 5249 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); 5250 if (f_obj->complete_cmd(bp, f_obj, 5251 BNX2X_F_CMD_TX_STOP)) 5252 break; 5253 goto next_spqe; 5254 5255 case EVENT_RING_OPCODE_START_TRAFFIC: 5256 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); 5257 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 5258 if (f_obj->complete_cmd(bp, f_obj, 5259 BNX2X_F_CMD_TX_START)) 5260 break; 5261 goto next_spqe; 5262 5263 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 5264 echo = elem->message.data.function_update_event.echo; 5265 if (echo == SWITCH_UPDATE) { 5266 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5267 "got FUNC_SWITCH_UPDATE ramrod\n"); 5268 if (f_obj->complete_cmd( 5269 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE)) 5270 break; 5271 5272 } else { 5273 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 5274 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 5275 f_obj->complete_cmd(bp, f_obj, 5276 BNX2X_F_CMD_AFEX_UPDATE); 5277 5278 /* We will perform the Queues update from 5279 * sp_rtnl task as all Queue SP operations 5280 * should run under rtnl_lock. 5281 */ 5282 smp_mb__before_clear_bit(); 5283 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, 5284 &bp->sp_rtnl_state); 5285 smp_mb__after_clear_bit(); 5286 5287 schedule_delayed_work(&bp->sp_rtnl_task, 0); 5288 } 5289 5290 goto next_spqe; 5291 5292 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 5293 f_obj->complete_cmd(bp, f_obj, 5294 BNX2X_F_CMD_AFEX_VIFLISTS); 5295 bnx2x_after_afex_vif_lists(bp, elem); 5296 goto next_spqe; 5297 case EVENT_RING_OPCODE_FUNCTION_START: 5298 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5299 "got FUNC_START ramrod\n"); 5300 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) 5301 break; 5302 5303 goto next_spqe; 5304 5305 case EVENT_RING_OPCODE_FUNCTION_STOP: 5306 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 5307 "got FUNC_STOP ramrod\n"); 5308 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) 5309 break; 5310 5311 goto next_spqe; 5312 } 5313 5314 switch (opcode | bp->state) { 5315 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5316 BNX2X_STATE_OPEN): 5317 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5318 BNX2X_STATE_OPENING_WAIT4_PORT): 5319 cid = elem->message.data.eth_event.echo & 5320 BNX2X_SWCID_MASK; 5321 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 5322 cid); 5323 rss_raw->clear_pending(rss_raw); 5324 break; 5325 5326 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): 5327 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): 5328 case (EVENT_RING_OPCODE_SET_MAC | 5329 BNX2X_STATE_CLOSING_WAIT4_HALT): 5330 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5331 BNX2X_STATE_OPEN): 5332 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5333 BNX2X_STATE_DIAG): 5334 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | 5335 BNX2X_STATE_CLOSING_WAIT4_HALT): 5336 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); 5337 bnx2x_handle_classification_eqe(bp, elem); 5338 break; 5339 5340 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5341 BNX2X_STATE_OPEN): 5342 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5343 BNX2X_STATE_DIAG): 5344 case (EVENT_RING_OPCODE_MULTICAST_RULES | 5345 BNX2X_STATE_CLOSING_WAIT4_HALT): 5346 DP(BNX2X_MSG_SP, "got mcast ramrod\n"); 5347 bnx2x_handle_mcast_eqe(bp); 5348 break; 5349 5350 case (EVENT_RING_OPCODE_FILTERS_RULES | 5351 BNX2X_STATE_OPEN): 5352 case (EVENT_RING_OPCODE_FILTERS_RULES | 5353 BNX2X_STATE_DIAG): 5354 case (EVENT_RING_OPCODE_FILTERS_RULES | 5355 BNX2X_STATE_CLOSING_WAIT4_HALT): 5356 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); 5357 bnx2x_handle_rx_mode_eqe(bp); 5358 break; 5359 default: 5360 /* unknown event log error and continue */ 5361 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", 5362 elem->message.opcode, bp->state); 5363 } 5364next_spqe: 5365 spqe_cnt++; 5366 } /* for */ 5367 5368 smp_mb__before_atomic_inc(); 5369 atomic_add(spqe_cnt, &bp->eq_spq_left); 5370 5371 bp->eq_cons = sw_cons; 5372 bp->eq_prod = sw_prod; 5373 /* Make sure that above mem writes were issued towards the memory */ 5374 smp_wmb(); 5375 5376 /* update producer */ 5377 bnx2x_update_eq_prod(bp, bp->eq_prod); 5378} 5379 5380static void bnx2x_sp_task(struct work_struct *work) 5381{ 5382 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 5383 5384 DP(BNX2X_MSG_SP, "sp task invoked\n"); 5385 5386 /* make sure the atomic interrupt_occurred has been written */ 5387 smp_rmb(); 5388 if (atomic_read(&bp->interrupt_occurred)) { 5389 5390 /* what work needs to be performed? */ 5391 u16 status = bnx2x_update_dsb_idx(bp); 5392 5393 DP(BNX2X_MSG_SP, "status %x\n", status); 5394 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n"); 5395 atomic_set(&bp->interrupt_occurred, 0); 5396 5397 /* HW attentions */ 5398 if (status & BNX2X_DEF_SB_ATT_IDX) { 5399 bnx2x_attn_int(bp); 5400 status &= ~BNX2X_DEF_SB_ATT_IDX; 5401 } 5402 5403 /* SP events: STAT_QUERY and others */ 5404 if (status & BNX2X_DEF_SB_IDX) { 5405 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5406 5407 if (FCOE_INIT(bp) && 5408 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5409 /* Prevent local bottom-halves from running as 5410 * we are going to change the local NAPI list. 5411 */ 5412 local_bh_disable(); 5413 napi_schedule(&bnx2x_fcoe(bp, napi)); 5414 local_bh_enable(); 5415 } 5416 5417 /* Handle EQ completions */ 5418 bnx2x_eq_int(bp); 5419 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 5420 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1); 5421 5422 status &= ~BNX2X_DEF_SB_IDX; 5423 } 5424 5425 /* if status is non zero then perhaps something went wrong */ 5426 if (unlikely(status)) 5427 DP(BNX2X_MSG_SP, 5428 "got an unknown interrupt! (status 0x%x)\n", status); 5429 5430 /* ack status block only if something was actually handled */ 5431 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5432 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5433 } 5434 5435 /* must be called after the EQ processing (since eq leads to sriov 5436 * ramrod completion flows). 5437 * This flow may have been scheduled by the arrival of a ramrod 5438 * completion, or by the sriov code rescheduling itself. 5439 */ 5440 bnx2x_iov_sp_task(bp); 5441 5442 /* afex - poll to check if VIFSET_ACK should be sent to MFW */ 5443 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, 5444 &bp->sp_state)) { 5445 bnx2x_link_report(bp); 5446 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 5447 } 5448} 5449 5450irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5451{ 5452 struct net_device *dev = dev_instance; 5453 struct bnx2x *bp = netdev_priv(dev); 5454 5455 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, 5456 IGU_INT_DISABLE, 0); 5457 5458#ifdef BNX2X_STOP_ON_ERROR 5459 if (unlikely(bp->panic)) 5460 return IRQ_HANDLED; 5461#endif 5462 5463 if (CNIC_LOADED(bp)) { 5464 struct cnic_ops *c_ops; 5465 5466 rcu_read_lock(); 5467 c_ops = rcu_dereference(bp->cnic_ops); 5468 if (c_ops) 5469 c_ops->cnic_handler(bp->cnic_data, NULL); 5470 rcu_read_unlock(); 5471 } 5472 5473 /* schedule sp task to perform default status block work, ack 5474 * attentions and enable interrupts. 5475 */ 5476 bnx2x_schedule_sp_task(bp); 5477 5478 return IRQ_HANDLED; 5479} 5480 5481/* end of slow path */ 5482 5483void bnx2x_drv_pulse(struct bnx2x *bp) 5484{ 5485 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, 5486 bp->fw_drv_pulse_wr_seq); 5487} 5488 5489static void bnx2x_timer(unsigned long data) 5490{ 5491 struct bnx2x *bp = (struct bnx2x *) data; 5492 5493 if (!netif_running(bp->dev)) 5494 return; 5495 5496 if (IS_PF(bp) && 5497 !BP_NOMCP(bp)) { 5498 int mb_idx = BP_FW_MB_IDX(bp); 5499 u16 drv_pulse; 5500 u16 mcp_pulse; 5501 5502 ++bp->fw_drv_pulse_wr_seq; 5503 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5504 drv_pulse = bp->fw_drv_pulse_wr_seq; 5505 bnx2x_drv_pulse(bp); 5506 5507 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & 5508 MCP_PULSE_SEQ_MASK); 5509 /* The delta between driver pulse and mcp response 5510 * should not get too big. If the MFW is more than 5 pulses 5511 * behind, we should worry about it enough to generate an error 5512 * log. 5513 */ 5514 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5) 5515 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n", 5516 drv_pulse, mcp_pulse); 5517 } 5518 5519 if (bp->state == BNX2X_STATE_OPEN) 5520 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 5521 5522 /* sample pf vf bulletin board for new posts from pf */ 5523 if (IS_VF(bp)) 5524 bnx2x_timer_sriov(bp); 5525 5526 mod_timer(&bp->timer, jiffies + bp->current_interval); 5527} 5528 5529/* end of Statistics */ 5530 5531/* nic init */ 5532 5533/* 5534 * nic init service functions 5535 */ 5536 5537static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5538{ 5539 u32 i; 5540 if (!(len%4) && !(addr%4)) 5541 for (i = 0; i < len; i += 4) 5542 REG_WR(bp, addr + i, fill); 5543 else 5544 for (i = 0; i < len; i++) 5545 REG_WR8(bp, addr + i, fill); 5546} 5547 5548/* helper: writes FP SP data to FW - data_size in dwords */ 5549static void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5550 int fw_sb_id, 5551 u32 *sb_data_p, 5552 u32 data_size) 5553{ 5554 int index; 5555 for (index = 0; index < data_size; index++) 5556 REG_WR(bp, BAR_CSTRORM_INTMEM + 5557 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + 5558 sizeof(u32)*index, 5559 *(sb_data_p + index)); 5560} 5561 5562static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5563{ 5564 u32 *sb_data_p; 5565 u32 data_size = 0; 5566 struct hc_status_block_data_e2 sb_data_e2; 5567 struct hc_status_block_data_e1x sb_data_e1x; 5568 5569 /* disable the function first */ 5570 if (!CHIP_IS_E1x(bp)) { 5571 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5572 sb_data_e2.common.state = SB_DISABLED; 5573 sb_data_e2.common.p_func.vf_valid = false; 5574 sb_data_p = (u32 *)&sb_data_e2; 5575 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5576 } else { 5577 memset(&sb_data_e1x, 0, 5578 sizeof(struct hc_status_block_data_e1x)); 5579 sb_data_e1x.common.state = SB_DISABLED; 5580 sb_data_e1x.common.p_func.vf_valid = false; 5581 sb_data_p = (u32 *)&sb_data_e1x; 5582 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5583 } 5584 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5585 5586 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5587 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0, 5588 CSTORM_STATUS_BLOCK_SIZE); 5589 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5590 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0, 5591 CSTORM_SYNC_BLOCK_SIZE); 5592} 5593 5594/* helper: writes SP SB data to FW */ 5595static void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5596 struct hc_sp_status_block_data *sp_sb_data) 5597{ 5598 int func = BP_FUNC(bp); 5599 int i; 5600 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 5601 REG_WR(bp, BAR_CSTRORM_INTMEM + 5602 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 5603 i*sizeof(u32), 5604 *((u32 *)sp_sb_data + i)); 5605} 5606 5607static void bnx2x_zero_sp_sb(struct bnx2x *bp) 5608{ 5609 int func = BP_FUNC(bp); 5610 struct hc_sp_status_block_data sp_sb_data; 5611 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5612 5613 sp_sb_data.state = SB_DISABLED; 5614 sp_sb_data.p_func.vf_valid = false; 5615 5616 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5617 5618 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5619 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0, 5620 CSTORM_SP_STATUS_BLOCK_SIZE); 5621 bnx2x_fill(bp, BAR_CSTRORM_INTMEM + 5622 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0, 5623 CSTORM_SP_SYNC_BLOCK_SIZE); 5624} 5625 5626static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, 5627 int igu_sb_id, int igu_seg_id) 5628{ 5629 hc_sm->igu_sb_id = igu_sb_id; 5630 hc_sm->igu_seg_id = igu_seg_id; 5631 hc_sm->timer_value = 0xFF; 5632 hc_sm->time_to_expire = 0xFFFFFFFF; 5633} 5634 5635/* allocates state machine ids. */ 5636static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data) 5637{ 5638 /* zero out state machine indices */ 5639 /* rx indices */ 5640 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5641 5642 /* tx indices */ 5643 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; 5644 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; 5645 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; 5646 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; 5647 5648 /* map indices */ 5649 /* rx indices */ 5650 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= 5651 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5652 5653 /* tx indices */ 5654 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= 5655 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5656 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= 5657 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5658 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= 5659 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5660 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= 5661 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT; 5662} 5663 5664void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, 5665 u8 vf_valid, int fw_sb_id, int igu_sb_id) 5666{ 5667 int igu_seg_id; 5668 5669 struct hc_status_block_data_e2 sb_data_e2; 5670 struct hc_status_block_data_e1x sb_data_e1x; 5671 struct hc_status_block_sm *hc_sm_p; 5672 int data_size; 5673 u32 *sb_data_p; 5674 5675 if (CHIP_INT_MODE_IS_BC(bp)) 5676 igu_seg_id = HC_SEG_ACCESS_NORM; 5677 else 5678 igu_seg_id = IGU_SEG_ACCESS_NORM; 5679 5680 bnx2x_zero_fp_sb(bp, fw_sb_id); 5681 5682 if (!CHIP_IS_E1x(bp)) { 5683 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); 5684 sb_data_e2.common.state = SB_ENABLED; 5685 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); 5686 sb_data_e2.common.p_func.vf_id = vfid; 5687 sb_data_e2.common.p_func.vf_valid = vf_valid; 5688 sb_data_e2.common.p_func.vnic_id = BP_VN(bp); 5689 sb_data_e2.common.same_igu_sb_1b = true; 5690 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); 5691 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); 5692 hc_sm_p = sb_data_e2.common.state_machine; 5693 sb_data_p = (u32 *)&sb_data_e2; 5694 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); 5695 bnx2x_map_sb_state_machines(sb_data_e2.index_data); 5696 } else { 5697 memset(&sb_data_e1x, 0, 5698 sizeof(struct hc_status_block_data_e1x)); 5699 sb_data_e1x.common.state = SB_ENABLED; 5700 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); 5701 sb_data_e1x.common.p_func.vf_id = 0xff; 5702 sb_data_e1x.common.p_func.vf_valid = false; 5703 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp); 5704 sb_data_e1x.common.same_igu_sb_1b = true; 5705 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); 5706 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); 5707 hc_sm_p = sb_data_e1x.common.state_machine; 5708 sb_data_p = (u32 *)&sb_data_e1x; 5709 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); 5710 bnx2x_map_sb_state_machines(sb_data_e1x.index_data); 5711 } 5712 5713 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], 5714 igu_sb_id, igu_seg_id); 5715 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], 5716 igu_sb_id, igu_seg_id); 5717 5718 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id); 5719 5720 /* write indices to HW - PCI guarantees endianity of regpairs */ 5721 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); 5722} 5723 5724static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, 5725 u16 tx_usec, u16 rx_usec) 5726{ 5727 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, 5728 false, rx_usec); 5729 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5730 HC_INDEX_ETH_TX_CQ_CONS_COS0, false, 5731 tx_usec); 5732 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5733 HC_INDEX_ETH_TX_CQ_CONS_COS1, false, 5734 tx_usec); 5735 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, 5736 HC_INDEX_ETH_TX_CQ_CONS_COS2, false, 5737 tx_usec); 5738} 5739 5740static void bnx2x_init_def_sb(struct bnx2x *bp) 5741{ 5742 struct host_sp_status_block *def_sb = bp->def_status_blk; 5743 dma_addr_t mapping = bp->def_status_blk_mapping; 5744 int igu_sp_sb_index; 5745 int igu_seg_id; 5746 int port = BP_PORT(bp); 5747 int func = BP_FUNC(bp); 5748 int reg_offset, reg_offset_en5; 5749 u64 section; 5750 int index; 5751 struct hc_sp_status_block_data sp_sb_data; 5752 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); 5753 5754 if (CHIP_INT_MODE_IS_BC(bp)) { 5755 igu_sp_sb_index = DEF_SB_IGU_ID; 5756 igu_seg_id = HC_SEG_ACCESS_DEF; 5757 } else { 5758 igu_sp_sb_index = bp->igu_dsb_id; 5759 igu_seg_id = IGU_SEG_ACCESS_DEF; 5760 } 5761 5762 /* ATTN */ 5763 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5764 atten_status_block); 5765 def_sb->atten_status_block.status_block_id = igu_sp_sb_index; 5766 5767 bp->attn_state = 0; 5768 5769 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5770 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5771 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : 5772 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); 5773 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 5774 int sindex; 5775 /* take care of sig[0]..sig[4] */ 5776 for (sindex = 0; sindex < 4; sindex++) 5777 bp->attn_group[index].sig[sindex] = 5778 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); 5779 5780 if (!CHIP_IS_E1x(bp)) 5781 /* 5782 * enable5 is separate from the rest of the registers, 5783 * and therefore the address skip is 4 5784 * and not 16 between the different groups 5785 */ 5786 bp->attn_group[index].sig[4] = REG_RD(bp, 5787 reg_offset_en5 + 0x4*index); 5788 else 5789 bp->attn_group[index].sig[4] = 0; 5790 } 5791 5792 if (bp->common.int_block == INT_BLOCK_HC) { 5793 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 5794 HC_REG_ATTN_MSG0_ADDR_L); 5795 5796 REG_WR(bp, reg_offset, U64_LO(section)); 5797 REG_WR(bp, reg_offset + 4, U64_HI(section)); 5798 } else if (!CHIP_IS_E1x(bp)) { 5799 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); 5800 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); 5801 } 5802 5803 section = ((u64)mapping) + offsetof(struct host_sp_status_block, 5804 sp_sb); 5805 5806 bnx2x_zero_sp_sb(bp); 5807 5808 /* PCI guarantees endianity of regpairs */ 5809 sp_sb_data.state = SB_ENABLED; 5810 sp_sb_data.host_sb_addr.lo = U64_LO(section); 5811 sp_sb_data.host_sb_addr.hi = U64_HI(section); 5812 sp_sb_data.igu_sb_id = igu_sp_sb_index; 5813 sp_sb_data.igu_seg_id = igu_seg_id; 5814 sp_sb_data.p_func.pf_id = func; 5815 sp_sb_data.p_func.vnic_id = BP_VN(bp); 5816 sp_sb_data.p_func.vf_id = 0xff; 5817 5818 bnx2x_wr_sp_sb_data(bp, &sp_sb_data); 5819 5820 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); 5821} 5822 5823void bnx2x_update_coalesce(struct bnx2x *bp) 5824{ 5825 int i; 5826 5827 for_each_eth_queue(bp, i) 5828 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 5829 bp->tx_ticks, bp->rx_ticks); 5830} 5831 5832static void bnx2x_init_sp_ring(struct bnx2x *bp) 5833{ 5834 spin_lock_init(&bp->spq_lock); 5835 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); 5836 5837 bp->spq_prod_idx = 0; 5838 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 5839 bp->spq_prod_bd = bp->spq; 5840 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 5841} 5842 5843static void bnx2x_init_eq_ring(struct bnx2x *bp) 5844{ 5845 int i; 5846 for (i = 1; i <= NUM_EQ_PAGES; i++) { 5847 union event_ring_elem *elem = 5848 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1]; 5849 5850 elem->next_page.addr.hi = 5851 cpu_to_le32(U64_HI(bp->eq_mapping + 5852 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); 5853 elem->next_page.addr.lo = 5854 cpu_to_le32(U64_LO(bp->eq_mapping + 5855 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES))); 5856 } 5857 bp->eq_cons = 0; 5858 bp->eq_prod = NUM_EQ_DESC; 5859 bp->eq_cons_sb = BNX2X_EQ_INDEX; 5860 /* we want a warning message before it gets wrought... */ 5861 atomic_set(&bp->eq_spq_left, 5862 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); 5863} 5864 5865/* called with netif_addr_lock_bh() */ 5866static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 5867 unsigned long rx_mode_flags, 5868 unsigned long rx_accept_flags, 5869 unsigned long tx_accept_flags, 5870 unsigned long ramrod_flags) 5871{ 5872 struct bnx2x_rx_mode_ramrod_params ramrod_param; 5873 int rc; 5874 5875 memset(&ramrod_param, 0, sizeof(ramrod_param)); 5876 5877 /* Prepare ramrod parameters */ 5878 ramrod_param.cid = 0; 5879 ramrod_param.cl_id = cl_id; 5880 ramrod_param.rx_mode_obj = &bp->rx_mode_obj; 5881 ramrod_param.func_id = BP_FUNC(bp); 5882 5883 ramrod_param.pstate = &bp->sp_state; 5884 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; 5885 5886 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); 5887 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); 5888 5889 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); 5890 5891 ramrod_param.ramrod_flags = ramrod_flags; 5892 ramrod_param.rx_mode_flags = rx_mode_flags; 5893 5894 ramrod_param.rx_accept_flags = rx_accept_flags; 5895 ramrod_param.tx_accept_flags = tx_accept_flags; 5896 5897 rc = bnx2x_config_rx_mode(bp, &ramrod_param); 5898 if (rc < 0) { 5899 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); 5900 return rc; 5901 } 5902 5903 return 0; 5904} 5905 5906static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, 5907 unsigned long *rx_accept_flags, 5908 unsigned long *tx_accept_flags) 5909{ 5910 /* Clear the flags first */ 5911 *rx_accept_flags = 0; 5912 *tx_accept_flags = 0; 5913 5914 switch (rx_mode) { 5915 case BNX2X_RX_MODE_NONE: 5916 /* 5917 * 'drop all' supersedes any accept flags that may have been 5918 * passed to the function. 5919 */ 5920 break; 5921 case BNX2X_RX_MODE_NORMAL: 5922 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 5923 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags); 5924 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 5925 5926 /* internal switching mode */ 5927 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 5928 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags); 5929 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 5930 5931 break; 5932 case BNX2X_RX_MODE_ALLMULTI: 5933 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 5934 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 5935 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 5936 5937 /* internal switching mode */ 5938 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 5939 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 5940 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 5941 5942 break; 5943 case BNX2X_RX_MODE_PROMISC: 5944 /* According to definition of SI mode, iface in promisc mode 5945 * should receive matched and unmatched (in resolution of port) 5946 * unicast packets. 5947 */ 5948 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags); 5949 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags); 5950 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags); 5951 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags); 5952 5953 /* internal switching mode */ 5954 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags); 5955 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags); 5956 5957 if (IS_MF_SI(bp)) 5958 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags); 5959 else 5960 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags); 5961 5962 break; 5963 default: 5964 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode); 5965 return -EINVAL; 5966 } 5967 5968 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ 5969 if (bp->rx_mode != BNX2X_RX_MODE_NONE) { 5970 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); 5971 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); 5972 } 5973 5974 return 0; 5975} 5976 5977/* called with netif_addr_lock_bh() */ 5978static int bnx2x_set_storm_rx_mode(struct bnx2x *bp) 5979{ 5980 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 5981 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 5982 int rc; 5983 5984 if (!NO_FCOE(bp)) 5985 /* Configure rx_mode of FCoE Queue */ 5986 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 5987 5988 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags, 5989 &tx_accept_flags); 5990 if (rc) 5991 return rc; 5992 5993 __set_bit(RAMROD_RX, &ramrod_flags); 5994 __set_bit(RAMROD_TX, &ramrod_flags); 5995 5996 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, 5997 rx_accept_flags, tx_accept_flags, 5998 ramrod_flags); 5999} 6000 6001static void bnx2x_init_internal_common(struct bnx2x *bp) 6002{ 6003 int i; 6004 6005 if (IS_MF_SI(bp)) 6006 /* 6007 * In switch independent mode, the TSTORM needs to accept 6008 * packets that failed classification, since approximate match 6009 * mac addresses aren't written to NIG LLH 6010 */ 6011 REG_WR8(bp, BAR_TSTRORM_INTMEM + 6012 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); 6013 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ 6014 REG_WR8(bp, BAR_TSTRORM_INTMEM + 6015 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); 6016 6017 /* Zero this manually as its initialization is 6018 currently missing in the initTool */ 6019 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 6020 REG_WR(bp, BAR_USTRORM_INTMEM + 6021 USTORM_AGG_DATA_OFFSET + i * 4, 0); 6022 if (!CHIP_IS_E1x(bp)) { 6023 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, 6024 CHIP_INT_MODE_IS_BC(bp) ? 6025 HC_IGU_BC_MODE : HC_IGU_NBC_MODE); 6026 } 6027} 6028 6029static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 6030{ 6031 switch (load_code) { 6032 case FW_MSG_CODE_DRV_LOAD_COMMON: 6033 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: 6034 bnx2x_init_internal_common(bp); 6035 /* no break */ 6036 6037 case FW_MSG_CODE_DRV_LOAD_PORT: 6038 /* nothing to do */ 6039 /* no break */ 6040 6041 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 6042 /* internal memory per function is 6043 initialized inside bnx2x_pf_init */ 6044 break; 6045 6046 default: 6047 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); 6048 break; 6049 } 6050} 6051 6052static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 6053{ 6054 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp); 6055} 6056 6057static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 6058{ 6059 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp); 6060} 6061 6062static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 6063{ 6064 if (CHIP_IS_E1x(fp->bp)) 6065 return BP_L_ID(fp->bp) + fp->index; 6066 else /* We want Client ID to be the same as IGU SB ID for 57712 */ 6067 return bnx2x_fp_igu_sb_id(fp); 6068} 6069 6070static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) 6071{ 6072 struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; 6073 u8 cos; 6074 unsigned long q_type = 0; 6075 u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; 6076 fp->rx_queue = fp_idx; 6077 fp->cid = fp_idx; 6078 fp->cl_id = bnx2x_fp_cl_id(fp); 6079 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); 6080 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); 6081 /* qZone id equals to FW (per path) client id */ 6082 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); 6083 6084 /* init shortcut */ 6085 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); 6086 6087 /* Setup SB indices */ 6088 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 6089 6090 /* Configure Queue State object */ 6091 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6092 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6093 6094 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); 6095 6096 /* init tx data */ 6097 for_each_cos_in_tx_queue(fp, cos) { 6098 bnx2x_init_txdata(bp, fp->txdata_ptr[cos], 6099 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), 6100 FP_COS_TO_TXQ(fp, cos, bp), 6101 BNX2X_TX_SB_INDEX_BASE + cos, fp); 6102 cids[cos] = fp->txdata_ptr[cos]->cid; 6103 } 6104 6105 /* nothing more for vf to do here */ 6106 if (IS_VF(bp)) 6107 return; 6108 6109 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, 6110 fp->fw_sb_id, fp->igu_sb_id); 6111 bnx2x_update_fpsb_idx(fp); 6112 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, 6113 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6114 bnx2x_sp_mapping(bp, q_rdata), q_type); 6115 6116 /** 6117 * Configure classification DBs: Always enable Tx switching 6118 */ 6119 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); 6120 6121 DP(NETIF_MSG_IFUP, 6122 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6123 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6124 fp->igu_sb_id); 6125} 6126 6127static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 6128{ 6129 int i; 6130 6131 for (i = 1; i <= NUM_TX_RINGS; i++) { 6132 struct eth_tx_next_bd *tx_next_bd = 6133 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 6134 6135 tx_next_bd->addr_hi = 6136 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 6137 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6138 tx_next_bd->addr_lo = 6139 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 6140 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 6141 } 6142 6143 *txdata->tx_cons_sb = cpu_to_le16(0); 6144 6145 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 6146 txdata->tx_db.data.zero_fill1 = 0; 6147 txdata->tx_db.data.prod = 0; 6148 6149 txdata->tx_pkt_prod = 0; 6150 txdata->tx_pkt_cons = 0; 6151 txdata->tx_bd_prod = 0; 6152 txdata->tx_bd_cons = 0; 6153 txdata->tx_pkt = 0; 6154} 6155 6156static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp) 6157{ 6158 int i; 6159 6160 for_each_tx_queue_cnic(bp, i) 6161 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]); 6162} 6163 6164static void bnx2x_init_tx_rings(struct bnx2x *bp) 6165{ 6166 int i; 6167 u8 cos; 6168 6169 for_each_eth_queue(bp, i) 6170 for_each_cos_in_tx_queue(&bp->fp[i], cos) 6171 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 6172} 6173 6174static void bnx2x_init_fcoe_fp(struct bnx2x *bp) 6175{ 6176 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 6177 unsigned long q_type = 0; 6178 6179 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 6180 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 6181 BNX2X_FCOE_ETH_CL_ID_IDX); 6182 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); 6183 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 6184 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 6185 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 6186 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), 6187 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, 6188 fp); 6189 6190 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 6191 6192 /* qZone id equals to FW (per path) client id */ 6193 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 6194 /* init shortcut */ 6195 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 6196 bnx2x_rx_ustorm_prods_offset(fp); 6197 6198 /* Configure Queue State object */ 6199 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 6200 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 6201 6202 /* No multi-CoS for FCoE L2 client */ 6203 BUG_ON(fp->max_cos != 1); 6204 6205 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, 6206 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 6207 bnx2x_sp_mapping(bp, q_rdata), q_type); 6208 6209 DP(NETIF_MSG_IFUP, 6210 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 6211 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 6212 fp->igu_sb_id); 6213} 6214 6215void bnx2x_nic_init_cnic(struct bnx2x *bp) 6216{ 6217 if (!NO_FCOE(bp)) 6218 bnx2x_init_fcoe_fp(bp); 6219 6220 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 6221 BNX2X_VF_ID_INVALID, false, 6222 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 6223 6224 /* ensure status block indices were read */ 6225 rmb(); 6226 bnx2x_init_rx_rings_cnic(bp); 6227 bnx2x_init_tx_rings_cnic(bp); 6228 6229 /* flush all */ 6230 mb(); 6231 mmiowb(); 6232} 6233 6234void bnx2x_pre_irq_nic_init(struct bnx2x *bp) 6235{ 6236 int i; 6237 6238 /* Setup NIC internals and enable interrupts */ 6239 for_each_eth_queue(bp, i) 6240 bnx2x_init_eth_fp(bp, i); 6241 6242 /* ensure status block indices were read */ 6243 rmb(); 6244 bnx2x_init_rx_rings(bp); 6245 bnx2x_init_tx_rings(bp); 6246 6247 if (IS_PF(bp)) { 6248 /* Initialize MOD_ABS interrupts */ 6249 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 6250 bp->common.shmem_base, 6251 bp->common.shmem2_base, BP_PORT(bp)); 6252 6253 /* initialize the default status block and sp ring */ 6254 bnx2x_init_def_sb(bp); 6255 bnx2x_update_dsb_idx(bp); 6256 bnx2x_init_sp_ring(bp); 6257 } else { 6258 bnx2x_memset_stats(bp); 6259 } 6260} 6261 6262void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code) 6263{ 6264 bnx2x_init_eq_ring(bp); 6265 bnx2x_init_internal(bp, load_code); 6266 bnx2x_pf_init(bp); 6267 bnx2x_stats_init(bp); 6268 6269 /* flush all before enabling interrupts */ 6270 mb(); 6271 mmiowb(); 6272 6273 bnx2x_int_enable(bp); 6274 6275 /* Check for SPIO5 */ 6276 bnx2x_attn_int_deasserted0(bp, 6277 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) & 6278 AEU_INPUTS_ATTN_BITS_SPIO5); 6279} 6280 6281/* gzip service functions */ 6282static int bnx2x_gunzip_init(struct bnx2x *bp) 6283{ 6284 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, 6285 &bp->gunzip_mapping, GFP_KERNEL); 6286 if (bp->gunzip_buf == NULL) 6287 goto gunzip_nomem1; 6288 6289 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL); 6290 if (bp->strm == NULL) 6291 goto gunzip_nomem2; 6292 6293 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); 6294 if (bp->strm->workspace == NULL) 6295 goto gunzip_nomem3; 6296 6297 return 0; 6298 6299gunzip_nomem3: 6300 kfree(bp->strm); 6301 bp->strm = NULL; 6302 6303gunzip_nomem2: 6304 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6305 bp->gunzip_mapping); 6306 bp->gunzip_buf = NULL; 6307 6308gunzip_nomem1: 6309 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n"); 6310 return -ENOMEM; 6311} 6312 6313static void bnx2x_gunzip_end(struct bnx2x *bp) 6314{ 6315 if (bp->strm) { 6316 vfree(bp->strm->workspace); 6317 kfree(bp->strm); 6318 bp->strm = NULL; 6319 } 6320 6321 if (bp->gunzip_buf) { 6322 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, 6323 bp->gunzip_mapping); 6324 bp->gunzip_buf = NULL; 6325 } 6326} 6327 6328static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len) 6329{ 6330 int n, rc; 6331 6332 /* check gzip header */ 6333 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) { 6334 BNX2X_ERR("Bad gzip header\n"); 6335 return -EINVAL; 6336 } 6337 6338 n = 10; 6339 6340#define FNAME 0x8 6341 6342 if (zbuf[3] & FNAME) 6343 while ((zbuf[n++] != 0) && (n < len)); 6344 6345 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n; 6346 bp->strm->avail_in = len - n; 6347 bp->strm->next_out = bp->gunzip_buf; 6348 bp->strm->avail_out = FW_BUF_SIZE; 6349 6350 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); 6351 if (rc != Z_OK) 6352 return rc; 6353 6354 rc = zlib_inflate(bp->strm, Z_FINISH); 6355 if ((rc != Z_OK) && (rc != Z_STREAM_END)) 6356 netdev_err(bp->dev, "Firmware decompression error: %s\n", 6357 bp->strm->msg); 6358 6359 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6360 if (bp->gunzip_outlen & 0x3) 6361 netdev_err(bp->dev, 6362 "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6363 bp->gunzip_outlen); 6364 bp->gunzip_outlen >>= 2; 6365 6366 zlib_inflateEnd(bp->strm); 6367 6368 if (rc == Z_STREAM_END) 6369 return 0; 6370 6371 return rc; 6372} 6373 6374/* nic load/unload */ 6375 6376/* 6377 * General service functions 6378 */ 6379 6380/* send a NIG loopback debug packet */ 6381static void bnx2x_lb_pckt(struct bnx2x *bp) 6382{ 6383 u32 wb_write[3]; 6384 6385 /* Ethernet source and destination addresses */ 6386 wb_write[0] = 0x55555555; 6387 wb_write[1] = 0x55555555; 6388 wb_write[2] = 0x20; /* SOP */ 6389 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6390 6391 /* NON-IP protocol */ 6392 wb_write[0] = 0x09000000; 6393 wb_write[1] = 0x55555555; 6394 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ 6395 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); 6396} 6397 6398/* some of the internal memories 6399 * are not directly readable from the driver 6400 * to test them we send debug packets 6401 */ 6402static int bnx2x_int_mem_test(struct bnx2x *bp) 6403{ 6404 int factor; 6405 int count, i; 6406 u32 val = 0; 6407 6408 if (CHIP_REV_IS_FPGA(bp)) 6409 factor = 120; 6410 else if (CHIP_REV_IS_EMUL(bp)) 6411 factor = 200; 6412 else 6413 factor = 1; 6414 6415 /* Disable inputs of parser neighbor blocks */ 6416 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6417 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6418 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6419 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6420 6421 /* Write 0 to parser credits for CFC search request */ 6422 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6423 6424 /* send Ethernet packet */ 6425 bnx2x_lb_pckt(bp); 6426 6427 /* TODO do i reset NIG statistic? */ 6428 /* Wait until NIG register shows 1 packet of size 0x10 */ 6429 count = 1000 * factor; 6430 while (count) { 6431 6432 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6433 val = *bnx2x_sp(bp, wb_data[0]); 6434 if (val == 0x10) 6435 break; 6436 6437 usleep_range(10000, 20000); 6438 count--; 6439 } 6440 if (val != 0x10) { 6441 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6442 return -1; 6443 } 6444 6445 /* Wait until PRS register shows 1 packet */ 6446 count = 1000 * factor; 6447 while (count) { 6448 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6449 if (val == 1) 6450 break; 6451 6452 usleep_range(10000, 20000); 6453 count--; 6454 } 6455 if (val != 0x1) { 6456 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6457 return -2; 6458 } 6459 6460 /* Reset and init BRB, PRS */ 6461 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6462 msleep(50); 6463 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6464 msleep(50); 6465 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6466 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6467 6468 DP(NETIF_MSG_HW, "part2\n"); 6469 6470 /* Disable inputs of parser neighbor blocks */ 6471 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 6472 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 6473 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 6474 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0); 6475 6476 /* Write 0 to parser credits for CFC search request */ 6477 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 6478 6479 /* send 10 Ethernet packets */ 6480 for (i = 0; i < 10; i++) 6481 bnx2x_lb_pckt(bp); 6482 6483 /* Wait until NIG register shows 10 + 1 6484 packets of size 11*0x10 = 0xb0 */ 6485 count = 1000 * factor; 6486 while (count) { 6487 6488 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 6489 val = *bnx2x_sp(bp, wb_data[0]); 6490 if (val == 0xb0) 6491 break; 6492 6493 usleep_range(10000, 20000); 6494 count--; 6495 } 6496 if (val != 0xb0) { 6497 BNX2X_ERR("NIG timeout val = 0x%x\n", val); 6498 return -3; 6499 } 6500 6501 /* Wait until PRS register shows 2 packets */ 6502 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6503 if (val != 2) 6504 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6505 6506 /* Write 1 to parser credits for CFC search request */ 6507 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); 6508 6509 /* Wait until PRS register shows 3 packets */ 6510 msleep(10 * factor); 6511 /* Wait until NIG register shows 1 packet of size 0x10 */ 6512 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS); 6513 if (val != 3) 6514 BNX2X_ERR("PRS timeout val = 0x%x\n", val); 6515 6516 /* clear NIG EOP FIFO */ 6517 for (i = 0; i < 11; i++) 6518 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO); 6519 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY); 6520 if (val != 1) { 6521 BNX2X_ERR("clear of NIG failed\n"); 6522 return -4; 6523 } 6524 6525 /* Reset and init BRB, PRS, NIG */ 6526 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); 6527 msleep(50); 6528 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); 6529 msleep(50); 6530 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6531 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6532 if (!CNIC_SUPPORT(bp)) 6533 /* set NIC mode */ 6534 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6535 6536 /* Enable inputs of parser neighbor blocks */ 6537 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6538 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 6539 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 6540 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1); 6541 6542 DP(NETIF_MSG_HW, "done\n"); 6543 6544 return 0; /* OK */ 6545} 6546 6547static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6548{ 6549 u32 val; 6550 6551 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6552 if (!CHIP_IS_E1x(bp)) 6553 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6554 else 6555 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 6556 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6557 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 6558 /* 6559 * mask read length error interrupts in brb for parser 6560 * (parsing unit and 'checksum and crc' unit) 6561 * these errors are legal (PU reads fixed length and CAC can cause 6562 * read length error on truncated packets) 6563 */ 6564 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00); 6565 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 6566 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 6567 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 6568 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0); 6569 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0); 6570/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */ 6571/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */ 6572 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0); 6573 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0); 6574 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0); 6575/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */ 6576/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */ 6577 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); 6578 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0); 6579 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0); 6580 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 6581/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6582/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6583 6584 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | 6585 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | 6586 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN; 6587 if (!CHIP_IS_E1x(bp)) 6588 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | 6589 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED; 6590 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val); 6591 6592 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6593 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6594 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6595/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ 6596 6597 if (!CHIP_IS_E1x(bp)) 6598 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ 6599 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); 6600 6601 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 6602 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 6603/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 6604 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ 6605} 6606 6607static void bnx2x_reset_common(struct bnx2x *bp) 6608{ 6609 u32 val = 0x1400; 6610 6611 /* reset_common */ 6612 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6613 0xd3ffff7f); 6614 6615 if (CHIP_IS_E3(bp)) { 6616 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6617 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6618 } 6619 6620 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); 6621} 6622 6623static void bnx2x_setup_dmae(struct bnx2x *bp) 6624{ 6625 bp->dmae_ready = 0; 6626 spin_lock_init(&bp->dmae_lock); 6627} 6628 6629static void bnx2x_init_pxp(struct bnx2x *bp) 6630{ 6631 u16 devctl; 6632 int r_order, w_order; 6633 6634 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl); 6635 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); 6636 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 6637 if (bp->mrrs == -1) 6638 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12); 6639 else { 6640 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs); 6641 r_order = bp->mrrs; 6642 } 6643 6644 bnx2x_init_pxp_arb(bp, r_order, w_order); 6645} 6646 6647static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6648{ 6649 int is_required; 6650 u32 val; 6651 int port; 6652 6653 if (BP_NOMCP(bp)) 6654 return; 6655 6656 is_required = 0; 6657 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6658 SHARED_HW_CFG_FAN_FAILURE_MASK; 6659 6660 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) 6661 is_required = 1; 6662 6663 /* 6664 * The fan failure mechanism is usually related to the PHY type since 6665 * the power consumption of the board is affected by the PHY. Currently, 6666 * fan is required for most designs with SFX7101, BCM8727 and BCM8481. 6667 */ 6668 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 6669 for (port = PORT_0; port < PORT_MAX; port++) { 6670 is_required |= 6671 bnx2x_fan_failure_det_req( 6672 bp, 6673 bp->common.shmem_base, 6674 bp->common.shmem2_base, 6675 port); 6676 } 6677 6678 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 6679 6680 if (is_required == 0) 6681 return; 6682 6683 /* Fan failure is indicated by SPIO 5 */ 6684 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); 6685 6686 /* set to active low mode */ 6687 val = REG_RD(bp, MISC_REG_SPIO_INT); 6688 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); 6689 REG_WR(bp, MISC_REG_SPIO_INT, val); 6690 6691 /* enable interrupt to signal the IGU */ 6692 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6693 val |= MISC_SPIO_SPIO5; 6694 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6695} 6696 6697void bnx2x_pf_disable(struct bnx2x *bp) 6698{ 6699 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 6700 val &= ~IGU_PF_CONF_FUNC_EN; 6701 6702 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 6703 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6704 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6705} 6706 6707static void bnx2x__common_init_phy(struct bnx2x *bp) 6708{ 6709 u32 shmem_base[2], shmem2_base[2]; 6710 /* Avoid common init in case MFW supports LFA */ 6711 if (SHMEM2_RD(bp, size) > 6712 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 6713 return; 6714 shmem_base[0] = bp->common.shmem_base; 6715 shmem2_base[0] = bp->common.shmem2_base; 6716 if (!CHIP_IS_E1x(bp)) { 6717 shmem_base[1] = 6718 SHMEM2_RD(bp, other_shmem_base_addr); 6719 shmem2_base[1] = 6720 SHMEM2_RD(bp, other_shmem2_base_addr); 6721 } 6722 bnx2x_acquire_phy_lock(bp); 6723 bnx2x_common_init_phy(bp, shmem_base, shmem2_base, 6724 bp->common.chip_id); 6725 bnx2x_release_phy_lock(bp); 6726} 6727 6728/** 6729 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 6730 * 6731 * @bp: driver handle 6732 */ 6733static int bnx2x_init_hw_common(struct bnx2x *bp) 6734{ 6735 u32 val; 6736 6737 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp)); 6738 6739 /* 6740 * take the RESET lock to protect undi_unload flow from accessing 6741 * registers while we're resetting the chip 6742 */ 6743 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6744 6745 bnx2x_reset_common(bp); 6746 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 6747 6748 val = 0xfffc; 6749 if (CHIP_IS_E3(bp)) { 6750 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; 6751 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; 6752 } 6753 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); 6754 6755 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 6756 6757 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); 6758 6759 if (!CHIP_IS_E1x(bp)) { 6760 u8 abs_func_id; 6761 6762 /** 6763 * 4-port mode or 2-port mode we need to turn of master-enable 6764 * for everyone, after that, turn it back on for self. 6765 * so, we disregard multi-function or not, and always disable 6766 * for all functions on the given path, this means 0,2,4,6 for 6767 * path 0 and 1,3,5,7 for path 1 6768 */ 6769 for (abs_func_id = BP_PATH(bp); 6770 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { 6771 if (abs_func_id == BP_ABS_FUNC(bp)) { 6772 REG_WR(bp, 6773 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 6774 1); 6775 continue; 6776 } 6777 6778 bnx2x_pretend_func(bp, abs_func_id); 6779 /* clear pf enable */ 6780 bnx2x_pf_disable(bp); 6781 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6782 } 6783 } 6784 6785 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); 6786 if (CHIP_IS_E1(bp)) { 6787 /* enable HW interrupt from PXP on USDM overflow 6788 bit 16 on INT_MASK_0 */ 6789 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6790 } 6791 6792 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 6793 bnx2x_init_pxp(bp); 6794 6795#ifdef __BIG_ENDIAN 6796 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); 6797 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); 6798 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 6799 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 6800 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 6801 /* make sure this value is 0 */ 6802 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); 6803 6804/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ 6805 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); 6806 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); 6807 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); 6808 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 6809#endif 6810 6811 bnx2x_ilt_init_page_size(bp, INITOP_SET); 6812 6813 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 6814 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 6815 6816 /* let the HW do it's magic ... */ 6817 msleep(100); 6818 /* finish PXP init */ 6819 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE); 6820 if (val != 1) { 6821 BNX2X_ERR("PXP2 CFG failed\n"); 6822 return -EBUSY; 6823 } 6824 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE); 6825 if (val != 1) { 6826 BNX2X_ERR("PXP2 RD_INIT failed\n"); 6827 return -EBUSY; 6828 } 6829 6830 /* Timers bug workaround E2 only. We need to set the entire ILT to 6831 * have entries with value "0" and valid bit on. 6832 * This needs to be done by the first PF that is loaded in a path 6833 * (i.e. common phase) 6834 */ 6835 if (!CHIP_IS_E1x(bp)) { 6836/* In E2 there is a bug in the timers block that can cause function 6 / 7 6837 * (i.e. vnic3) to start even if it is marked as "scan-off". 6838 * This occurs when a different function (func2,3) is being marked 6839 * as "scan-off". Real-life scenario for example: if a driver is being 6840 * load-unloaded while func6,7 are down. This will cause the timer to access 6841 * the ilt, translate to a logical address and send a request to read/write. 6842 * Since the ilt for the function that is down is not valid, this will cause 6843 * a translation error which is unrecoverable. 6844 * The Workaround is intended to make sure that when this happens nothing fatal 6845 * will occur. The workaround: 6846 * 1. First PF driver which loads on a path will: 6847 * a. After taking the chip out of reset, by using pretend, 6848 * it will write "0" to the following registers of 6849 * the other vnics. 6850 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 6851 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); 6852 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); 6853 * And for itself it will write '1' to 6854 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable 6855 * dmae-operations (writing to pram for example.) 6856 * note: can be done for only function 6,7 but cleaner this 6857 * way. 6858 * b. Write zero+valid to the entire ILT. 6859 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of 6860 * VNIC3 (of that port). The range allocated will be the 6861 * entire ILT. This is needed to prevent ILT range error. 6862 * 2. Any PF driver load flow: 6863 * a. ILT update with the physical addresses of the allocated 6864 * logical pages. 6865 * b. Wait 20msec. - note that this timeout is needed to make 6866 * sure there are no requests in one of the PXP internal 6867 * queues with "old" ILT addresses. 6868 * c. PF enable in the PGLC. 6869 * d. Clear the was_error of the PF in the PGLC. (could have 6870 * occurred while driver was down) 6871 * e. PF enable in the CFC (WEAK + STRONG) 6872 * f. Timers scan enable 6873 * 3. PF driver unload flow: 6874 * a. Clear the Timers scan_en. 6875 * b. Polling for scan_on=0 for that PF. 6876 * c. Clear the PF enable bit in the PXP. 6877 * d. Clear the PF enable in the CFC (WEAK + STRONG) 6878 * e. Write zero+valid to all ILT entries (The valid bit must 6879 * stay set) 6880 * f. If this is VNIC 3 of a port then also init 6881 * first_timers_ilt_entry to zero and last_timers_ilt_entry 6882 * to the last entry in the ILT. 6883 * 6884 * Notes: 6885 * Currently the PF error in the PGLC is non recoverable. 6886 * In the future the there will be a recovery routine for this error. 6887 * Currently attention is masked. 6888 * Having an MCP lock on the load/unload process does not guarantee that 6889 * there is no Timer disable during Func6/7 enable. This is because the 6890 * Timers scan is currently being cleared by the MCP on FLR. 6891 * Step 2.d can be done only for PF6/7 and the driver can also check if 6892 * there is error before clearing it. But the flow above is simpler and 6893 * more general. 6894 * All ILT entries are written by zero+valid and not just PF6/7 6895 * ILT entries since in the future the ILT entries allocation for 6896 * PF-s might be dynamic. 6897 */ 6898 struct ilt_client_info ilt_cli; 6899 struct bnx2x_ilt ilt; 6900 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 6901 memset(&ilt, 0, sizeof(struct bnx2x_ilt)); 6902 6903 /* initialize dummy TM client */ 6904 ilt_cli.start = 0; 6905 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 6906 ilt_cli.client_num = ILT_CLIENT_TM; 6907 6908 /* Step 1: set zeroes to all ilt page entries with valid bit on 6909 * Step 2: set the timers first/last ilt entry to point 6910 * to the entire range to prevent ILT range error for 3rd/4th 6911 * vnic (this code assumes existence of the vnic) 6912 * 6913 * both steps performed by call to bnx2x_ilt_client_init_op() 6914 * with dummy TM client 6915 * 6916 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT 6917 * and his brother are split registers 6918 */ 6919 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6)); 6920 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR); 6921 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 6922 6923 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN); 6924 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN); 6925 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); 6926 } 6927 6928 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 6929 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 6930 6931 if (!CHIP_IS_E1x(bp)) { 6932 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : 6933 (CHIP_REV_IS_FPGA(bp) ? 400 : 0); 6934 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); 6935 6936 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); 6937 6938 /* let the HW do it's magic ... */ 6939 do { 6940 msleep(200); 6941 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE); 6942 } while (factor-- && (val != 1)); 6943 6944 if (val != 1) { 6945 BNX2X_ERR("ATC_INIT failed\n"); 6946 return -EBUSY; 6947 } 6948 } 6949 6950 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); 6951 6952 bnx2x_iov_init_dmae(bp); 6953 6954 /* clean the DMAE memory */ 6955 bp->dmae_ready = 1; 6956 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); 6957 6958 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); 6959 6960 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); 6961 6962 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); 6963 6964 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); 6965 6966 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); 6967 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); 6968 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); 6969 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 6970 6971 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); 6972 6973 /* QM queues pointers table */ 6974 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); 6975 6976 /* soft reset pulse */ 6977 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6978 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6979 6980 if (CNIC_SUPPORT(bp)) 6981 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6982 6983 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6984 6985 if (!CHIP_REV_IS_SLOW(bp)) 6986 /* enable hw interrupt from doorbell Q */ 6987 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 6988 6989 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6990 6991 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6992 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 6993 6994 if (!CHIP_IS_E1(bp)) 6995 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 6996 6997 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) { 6998 if (IS_MF_AFEX(bp)) { 6999 /* configure that VNTag and VLAN headers must be 7000 * received in afex mode 7001 */ 7002 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE); 7003 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA); 7004 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6); 7005 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926); 7006 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4); 7007 } else { 7008 /* Bit-map indicating which L2 hdrs may appear 7009 * after the basic Ethernet header 7010 */ 7011 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 7012 bp->path_has_ovlan ? 7 : 6); 7013 } 7014 } 7015 7016 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 7017 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 7018 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); 7019 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); 7020 7021 if (!CHIP_IS_E1x(bp)) { 7022 /* reset VFC memories */ 7023 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 7024 VFC_MEMORIES_RST_REG_CAM_RST | 7025 VFC_MEMORIES_RST_REG_RAM_RST); 7026 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, 7027 VFC_MEMORIES_RST_REG_CAM_RST | 7028 VFC_MEMORIES_RST_REG_RAM_RST); 7029 7030 msleep(20); 7031 } 7032 7033 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); 7034 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); 7035 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); 7036 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); 7037 7038 /* sync semi rtc */ 7039 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 7040 0x80000000); 7041 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 7042 0x80000000); 7043 7044 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); 7045 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 7046 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 7047 7048 if (!CHIP_IS_E1x(bp)) { 7049 if (IS_MF_AFEX(bp)) { 7050 /* configure that VNTag and VLAN headers must be 7051 * sent in afex mode 7052 */ 7053 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE); 7054 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA); 7055 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6); 7056 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926); 7057 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4); 7058 } else { 7059 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 7060 bp->path_has_ovlan ? 7 : 6); 7061 } 7062 } 7063 7064 REG_WR(bp, SRC_REG_SOFT_RST, 1); 7065 7066 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 7067 7068 if (CNIC_SUPPORT(bp)) { 7069 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 7070 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 7071 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 7072 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 7073 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 7074 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 7075 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 7076 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 7077 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 7078 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 7079 } 7080 REG_WR(bp, SRC_REG_SOFT_RST, 0); 7081 7082 if (sizeof(union cdu_context) != 1024) 7083 /* we currently assume that a context is 1024 bytes */ 7084 dev_alert(&bp->pdev->dev, 7085 "please adjust the size of cdu_context(%ld)\n", 7086 (long)sizeof(union cdu_context)); 7087 7088 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); 7089 val = (4 << 24) + (0 << 12) + 1024; 7090 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); 7091 7092 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); 7093 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); 7094 /* enable context validation interrupt from CFC */ 7095 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 7096 7097 /* set the thresholds to prevent CFC/CDU race */ 7098 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 7099 7100 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); 7101 7102 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) 7103 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); 7104 7105 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); 7106 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); 7107 7108 /* Reset PCIE errors for debug */ 7109 REG_WR(bp, 0x2814, 0xffffffff); 7110 REG_WR(bp, 0x3820, 0xffffffff); 7111 7112 if (!CHIP_IS_E1x(bp)) { 7113 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, 7114 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | 7115 PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); 7116 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, 7117 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | 7118 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | 7119 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); 7120 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, 7121 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | 7122 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | 7123 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); 7124 } 7125 7126 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); 7127 if (!CHIP_IS_E1(bp)) { 7128 /* in E3 this done in per-port section */ 7129 if (!CHIP_IS_E3(bp)) 7130 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7131 } 7132 if (CHIP_IS_E1H(bp)) 7133 /* not applicable for E2 (and above ...) */ 7134 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); 7135 7136 if (CHIP_REV_IS_SLOW(bp)) 7137 msleep(200); 7138 7139 /* finish CFC init */ 7140 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10); 7141 if (val != 1) { 7142 BNX2X_ERR("CFC LL_INIT failed\n"); 7143 return -EBUSY; 7144 } 7145 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10); 7146 if (val != 1) { 7147 BNX2X_ERR("CFC AC_INIT failed\n"); 7148 return -EBUSY; 7149 } 7150 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10); 7151 if (val != 1) { 7152 BNX2X_ERR("CFC CAM_INIT failed\n"); 7153 return -EBUSY; 7154 } 7155 REG_WR(bp, CFC_REG_DEBUG0, 0); 7156 7157 if (CHIP_IS_E1(bp)) { 7158 /* read NIG statistic 7159 to see if this is our first up since powerup */ 7160 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 7161 val = *bnx2x_sp(bp, wb_data[0]); 7162 7163 /* do internal memory self test */ 7164 if ((val == 0) && bnx2x_int_mem_test(bp)) { 7165 BNX2X_ERR("internal mem self test failed\n"); 7166 return -EBUSY; 7167 } 7168 } 7169 7170 bnx2x_setup_fan_failure_detection(bp); 7171 7172 /* clear PXP2 attentions */ 7173 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 7174 7175 bnx2x_enable_blocks_attention(bp); 7176 bnx2x_enable_blocks_parity(bp); 7177 7178 if (!BP_NOMCP(bp)) { 7179 if (CHIP_IS_E1x(bp)) 7180 bnx2x__common_init_phy(bp); 7181 } else 7182 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 7183 7184 return 0; 7185} 7186 7187/** 7188 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. 7189 * 7190 * @bp: driver handle 7191 */ 7192static int bnx2x_init_hw_common_chip(struct bnx2x *bp) 7193{ 7194 int rc = bnx2x_init_hw_common(bp); 7195 7196 if (rc) 7197 return rc; 7198 7199 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 7200 if (!BP_NOMCP(bp)) 7201 bnx2x__common_init_phy(bp); 7202 7203 return 0; 7204} 7205 7206static int bnx2x_init_hw_port(struct bnx2x *bp) 7207{ 7208 int port = BP_PORT(bp); 7209 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; 7210 u32 low, high; 7211 u32 val, reg; 7212 7213 DP(NETIF_MSG_HW, "starting port init port %d\n", port); 7214 7215 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 7216 7217 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7218 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7219 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7220 7221 /* Timers bug workaround: disables the pf_master bit in pglue at 7222 * common phase, we need to enable it here before any dmae access are 7223 * attempted. Therefore we manually added the enable-master to the 7224 * port phase (it also happens in the function phase) 7225 */ 7226 if (!CHIP_IS_E1x(bp)) 7227 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7228 7229 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7230 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7231 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7232 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7233 7234 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7235 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7236 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7237 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7238 7239 /* QM cid (connection) count */ 7240 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 7241 7242 if (CNIC_SUPPORT(bp)) { 7243 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7244 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 7245 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 7246 } 7247 7248 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7249 7250 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7251 7252 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 7253 7254 if (IS_MF(bp)) 7255 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 7256 else if (bp->dev->mtu > 4096) { 7257 if (bp->flags & ONE_PORT_FLAG) 7258 low = 160; 7259 else { 7260 val = bp->dev->mtu; 7261 /* (24*1024 + val*4)/256 */ 7262 low = 96 + (val/64) + 7263 ((val % 64) ? 1 : 0); 7264 } 7265 } else 7266 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 7267 high = low + 56; /* 14*1024/256 */ 7268 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); 7269 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); 7270 } 7271 7272 if (CHIP_MODE_IS_4_PORT(bp)) 7273 REG_WR(bp, (BP_PORT(bp) ? 7274 BRB1_REG_MAC_GUARANTIED_1 : 7275 BRB1_REG_MAC_GUARANTIED_0), 40); 7276 7277 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7278 if (CHIP_IS_E3B0(bp)) { 7279 if (IS_MF_AFEX(bp)) { 7280 /* configure headers for AFEX mode */ 7281 REG_WR(bp, BP_PORT(bp) ? 7282 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7283 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); 7284 REG_WR(bp, BP_PORT(bp) ? 7285 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : 7286 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); 7287 REG_WR(bp, BP_PORT(bp) ? 7288 PRS_REG_MUST_HAVE_HDRS_PORT_1 : 7289 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); 7290 } else { 7291 /* Ovlan exists only if we are in multi-function + 7292 * switch-dependent mode, in switch-independent there 7293 * is no ovlan headers 7294 */ 7295 REG_WR(bp, BP_PORT(bp) ? 7296 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 7297 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 7298 (bp->path_has_ovlan ? 7 : 6)); 7299 } 7300 } 7301 7302 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7303 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7304 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7305 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7306 7307 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7308 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7309 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7310 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7311 7312 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7313 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7314 7315 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7316 7317 if (CHIP_IS_E1x(bp)) { 7318 /* configure PBF to work without PAUSE mtu 9000 */ 7319 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 7320 7321 /* update threshold */ 7322 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 7323 /* update init credit */ 7324 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 7325 7326 /* probe changes */ 7327 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 7328 udelay(50); 7329 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 7330 } 7331 7332 if (CNIC_SUPPORT(bp)) 7333 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7334 7335 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7336 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7337 7338 if (CHIP_IS_E1(bp)) { 7339 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7340 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7341 } 7342 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7343 7344 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7345 7346 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7347 /* init aeu_mask_attn_func_0/1: 7348 * - SF mode: bits 3-7 are masked. Only bits 0-2 are in use 7349 * - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF 7350 * bits 4-7 are used for "per vn group attention" */ 7351 val = IS_MF(bp) ? 0xF7 : 0x7; 7352 /* Enable DCBX attention for all but E1 */ 7353 val |= CHIP_IS_E1(bp) ? 0 : 0x10; 7354 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); 7355 7356 /* SCPAD_PARITY should NOT trigger close the gates */ 7357 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; 7358 REG_WR(bp, reg, 7359 REG_RD(bp, reg) & 7360 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7361 7362 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; 7363 REG_WR(bp, reg, 7364 REG_RD(bp, reg) & 7365 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); 7366 7367 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7368 7369 if (!CHIP_IS_E1x(bp)) { 7370 /* Bit-map indicating which L2 hdrs may appear after the 7371 * basic Ethernet header 7372 */ 7373 if (IS_MF_AFEX(bp)) 7374 REG_WR(bp, BP_PORT(bp) ? 7375 NIG_REG_P1_HDRS_AFTER_BASIC : 7376 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); 7377 else 7378 REG_WR(bp, BP_PORT(bp) ? 7379 NIG_REG_P1_HDRS_AFTER_BASIC : 7380 NIG_REG_P0_HDRS_AFTER_BASIC, 7381 IS_MF_SD(bp) ? 7 : 6); 7382 7383 if (CHIP_IS_E3(bp)) 7384 REG_WR(bp, BP_PORT(bp) ? 7385 NIG_REG_LLH1_MF_MODE : 7386 NIG_REG_LLH_MF_MODE, IS_MF(bp)); 7387 } 7388 if (!CHIP_IS_E3(bp)) 7389 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 7390 7391 if (!CHIP_IS_E1(bp)) { 7392 /* 0x2 disable mf_ov, 0x1 enable */ 7393 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 7394 (IS_MF_SD(bp) ? 0x1 : 0x2)); 7395 7396 if (!CHIP_IS_E1x(bp)) { 7397 val = 0; 7398 switch (bp->mf_mode) { 7399 case MULTI_FUNCTION_SD: 7400 val = 1; 7401 break; 7402 case MULTI_FUNCTION_SI: 7403 case MULTI_FUNCTION_AFEX: 7404 val = 2; 7405 break; 7406 } 7407 7408 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE : 7409 NIG_REG_LLH0_CLS_TYPE), val); 7410 } 7411 { 7412 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 7413 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 7414 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); 7415 } 7416 } 7417 7418 /* If SPIO5 is set to generate interrupts, enable it for this port */ 7419 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 7420 if (val & MISC_SPIO_SPIO5) { 7421 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 7422 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 7423 val = REG_RD(bp, reg_addr); 7424 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 7425 REG_WR(bp, reg_addr, val); 7426 } 7427 7428 return 0; 7429} 7430 7431static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 7432{ 7433 int reg; 7434 u32 wb_write[2]; 7435 7436 if (CHIP_IS_E1(bp)) 7437 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 7438 else 7439 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 7440 7441 wb_write[0] = ONCHIP_ADDR1(addr); 7442 wb_write[1] = ONCHIP_ADDR2(addr); 7443 REG_WR_DMAE(bp, reg, wb_write, 2); 7444} 7445 7446void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf) 7447{ 7448 u32 data, ctl, cnt = 100; 7449 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 7450 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 7451 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 7452 u32 sb_bit = 1 << (idu_sb_id%32); 7453 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 7454 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 7455 7456 /* Not supported in BC mode */ 7457 if (CHIP_INT_MODE_IS_BC(bp)) 7458 return; 7459 7460 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 7461 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 7462 IGU_REGULAR_CLEANUP_SET | 7463 IGU_REGULAR_BCLEANUP; 7464 7465 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 7466 func_encode << IGU_CTRL_REG_FID_SHIFT | 7467 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 7468 7469 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7470 data, igu_addr_data); 7471 REG_WR(bp, igu_addr_data, data); 7472 mmiowb(); 7473 barrier(); 7474 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 7475 ctl, igu_addr_ctl); 7476 REG_WR(bp, igu_addr_ctl, ctl); 7477 mmiowb(); 7478 barrier(); 7479 7480 /* wait for clean up to finish */ 7481 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 7482 msleep(20); 7483 7484 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 7485 DP(NETIF_MSG_HW, 7486 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 7487 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 7488 } 7489} 7490 7491static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 7492{ 7493 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7494} 7495 7496static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7497{ 7498 u32 i, base = FUNC_ILT_BASE(func); 7499 for (i = base; i < base + ILT_PER_FUNC; i++) 7500 bnx2x_ilt_wr(bp, i, 0); 7501} 7502 7503static void bnx2x_init_searcher(struct bnx2x *bp) 7504{ 7505 int port = BP_PORT(bp); 7506 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7507 /* T1 hash bits value determines the T1 number of entries */ 7508 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7509} 7510 7511static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) 7512{ 7513 int rc; 7514 struct bnx2x_func_state_params func_params = {NULL}; 7515 struct bnx2x_func_switch_update_params *switch_update_params = 7516 &func_params.params.switch_update; 7517 7518 /* Prepare parameters for function state transitions */ 7519 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 7520 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); 7521 7522 func_params.f_obj = &bp->func_obj; 7523 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; 7524 7525 /* Function parameters */ 7526 switch_update_params->suspend = suspend; 7527 7528 rc = bnx2x_func_state_change(bp, &func_params); 7529 7530 return rc; 7531} 7532 7533static int bnx2x_reset_nic_mode(struct bnx2x *bp) 7534{ 7535 int rc, i, port = BP_PORT(bp); 7536 int vlan_en = 0, mac_en[NUM_MACS]; 7537 7538 /* Close input from network */ 7539 if (bp->mf_mode == SINGLE_FUNCTION) { 7540 bnx2x_set_rx_filter(&bp->link_params, 0); 7541 } else { 7542 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN : 7543 NIG_REG_LLH0_FUNC_EN); 7544 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7545 NIG_REG_LLH0_FUNC_EN, 0); 7546 for (i = 0; i < NUM_MACS; i++) { 7547 mac_en[i] = REG_RD(bp, port ? 7548 (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7549 4 * i) : 7550 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 7551 4 * i)); 7552 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7553 4 * i) : 7554 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0); 7555 } 7556 } 7557 7558 /* Close BMC to host */ 7559 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7560 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0); 7561 7562 /* Suspend Tx switching to the PF. Completion of this ramrod 7563 * further guarantees that all the packets of that PF / child 7564 * VFs in BRB were processed by the Parser, so it is safe to 7565 * change the NIC_MODE register. 7566 */ 7567 rc = bnx2x_func_switch_update(bp, 1); 7568 if (rc) { 7569 BNX2X_ERR("Can't suspend tx-switching!\n"); 7570 return rc; 7571 } 7572 7573 /* Change NIC_MODE register */ 7574 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7575 7576 /* Open input from network */ 7577 if (bp->mf_mode == SINGLE_FUNCTION) { 7578 bnx2x_set_rx_filter(&bp->link_params, 1); 7579 } else { 7580 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN : 7581 NIG_REG_LLH0_FUNC_EN, vlan_en); 7582 for (i = 0; i < NUM_MACS; i++) { 7583 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE + 7584 4 * i) : 7585 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 7586 mac_en[i]); 7587 } 7588 } 7589 7590 /* Enable BMC to host */ 7591 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE : 7592 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1); 7593 7594 /* Resume Tx switching to the PF */ 7595 rc = bnx2x_func_switch_update(bp, 0); 7596 if (rc) { 7597 BNX2X_ERR("Can't resume tx-switching!\n"); 7598 return rc; 7599 } 7600 7601 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7602 return 0; 7603} 7604 7605int bnx2x_init_hw_func_cnic(struct bnx2x *bp) 7606{ 7607 int rc; 7608 7609 bnx2x_ilt_init_op_cnic(bp, INITOP_SET); 7610 7611 if (CONFIGURE_NIC_MODE(bp)) { 7612 /* Configure searcher as part of function hw init */ 7613 bnx2x_init_searcher(bp); 7614 7615 /* Reset NIC mode */ 7616 rc = bnx2x_reset_nic_mode(bp); 7617 if (rc) 7618 BNX2X_ERR("Can't change NIC mode!\n"); 7619 return rc; 7620 } 7621 7622 return 0; 7623} 7624 7625static int bnx2x_init_hw_func(struct bnx2x *bp) 7626{ 7627 int port = BP_PORT(bp); 7628 int func = BP_FUNC(bp); 7629 int init_phase = PHASE_PF0 + func; 7630 struct bnx2x_ilt *ilt = BP_ILT(bp); 7631 u16 cdu_ilt_start; 7632 u32 addr, val; 7633 u32 main_mem_base, main_mem_size, main_mem_prty_clr; 7634 int i, main_mem_width, rc; 7635 7636 DP(NETIF_MSG_HW, "starting func init func %d\n", func); 7637 7638 /* FLR cleanup - hmmm */ 7639 if (!CHIP_IS_E1x(bp)) { 7640 rc = bnx2x_pf_flr_clnup(bp); 7641 if (rc) { 7642 bnx2x_fw_dump(bp); 7643 return rc; 7644 } 7645 } 7646 7647 /* set MSI reconfigure capability */ 7648 if (bp->common.int_block == INT_BLOCK_HC) { 7649 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 7650 val = REG_RD(bp, addr); 7651 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 7652 REG_WR(bp, addr, val); 7653 } 7654 7655 bnx2x_init_block(bp, BLOCK_PXP, init_phase); 7656 bnx2x_init_block(bp, BLOCK_PXP2, init_phase); 7657 7658 ilt = BP_ILT(bp); 7659 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7660 7661 if (IS_SRIOV(bp)) 7662 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS; 7663 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start); 7664 7665 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes 7666 * those of the VFs, so start line should be reset 7667 */ 7668 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; 7669 for (i = 0; i < L2_ILT_LINES(bp); i++) { 7670 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; 7671 ilt->lines[cdu_ilt_start + i].page_mapping = 7672 bp->context[i].cxt_mapping; 7673 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; 7674 } 7675 7676 bnx2x_ilt_init_op(bp, INITOP_SET); 7677 7678 if (!CONFIGURE_NIC_MODE(bp)) { 7679 bnx2x_init_searcher(bp); 7680 REG_WR(bp, PRS_REG_NIC_MODE, 0); 7681 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n"); 7682 } else { 7683 /* Set NIC mode */ 7684 REG_WR(bp, PRS_REG_NIC_MODE, 1); 7685 DP(NETIF_MSG_IFUP, "NIC MODE configured\n"); 7686 } 7687 7688 if (!CHIP_IS_E1x(bp)) { 7689 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7690 7691 /* Turn on a single ISR mode in IGU if driver is going to use 7692 * INT#x or MSI 7693 */ 7694 if (!(bp->flags & USING_MSIX_FLAG)) 7695 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; 7696 /* 7697 * Timers workaround bug: function init part. 7698 * Need to wait 20msec after initializing ILT, 7699 * needed to make sure there are no requests in 7700 * one of the PXP internal queues with "old" ILT addresses 7701 */ 7702 msleep(20); 7703 /* 7704 * Master enable - Due to WB DMAE writes performed before this 7705 * register is re-initialized as part of the regular function 7706 * init 7707 */ 7708 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); 7709 /* Enable the function in IGU */ 7710 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf); 7711 } 7712 7713 bp->dmae_ready = 1; 7714 7715 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); 7716 7717 if (!CHIP_IS_E1x(bp)) 7718 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); 7719 7720 bnx2x_init_block(bp, BLOCK_ATC, init_phase); 7721 bnx2x_init_block(bp, BLOCK_DMAE, init_phase); 7722 bnx2x_init_block(bp, BLOCK_NIG, init_phase); 7723 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 7724 bnx2x_init_block(bp, BLOCK_MISC, init_phase); 7725 bnx2x_init_block(bp, BLOCK_TCM, init_phase); 7726 bnx2x_init_block(bp, BLOCK_UCM, init_phase); 7727 bnx2x_init_block(bp, BLOCK_CCM, init_phase); 7728 bnx2x_init_block(bp, BLOCK_XCM, init_phase); 7729 bnx2x_init_block(bp, BLOCK_TSEM, init_phase); 7730 bnx2x_init_block(bp, BLOCK_USEM, init_phase); 7731 bnx2x_init_block(bp, BLOCK_CSEM, init_phase); 7732 bnx2x_init_block(bp, BLOCK_XSEM, init_phase); 7733 7734 if (!CHIP_IS_E1x(bp)) 7735 REG_WR(bp, QM_REG_PF_EN, 1); 7736 7737 if (!CHIP_IS_E1x(bp)) { 7738 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7739 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7740 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7741 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); 7742 } 7743 bnx2x_init_block(bp, BLOCK_QM, init_phase); 7744 7745 bnx2x_init_block(bp, BLOCK_TM, init_phase); 7746 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 7747 REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */ 7748 7749 bnx2x_iov_init_dq(bp); 7750 7751 bnx2x_init_block(bp, BLOCK_BRB1, init_phase); 7752 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 7753 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 7754 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 7755 bnx2x_init_block(bp, BLOCK_USDM, init_phase); 7756 bnx2x_init_block(bp, BLOCK_XSDM, init_phase); 7757 bnx2x_init_block(bp, BLOCK_UPB, init_phase); 7758 bnx2x_init_block(bp, BLOCK_XPB, init_phase); 7759 bnx2x_init_block(bp, BLOCK_PBF, init_phase); 7760 if (!CHIP_IS_E1x(bp)) 7761 REG_WR(bp, PBF_REG_DISABLE_PF, 0); 7762 7763 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 7764 7765 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 7766 7767 if (!CHIP_IS_E1x(bp)) 7768 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); 7769 7770 if (IS_MF(bp)) { 7771 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 7772 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); 7773 } 7774 7775 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); 7776 7777 /* HC init per function */ 7778 if (bp->common.int_block == INT_BLOCK_HC) { 7779 if (CHIP_IS_E1H(bp)) { 7780 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7781 7782 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7783 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7784 } 7785 bnx2x_init_block(bp, BLOCK_HC, init_phase); 7786 7787 } else { 7788 int num_segs, sb_idx, prod_offset; 7789 7790 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 7791 7792 if (!CHIP_IS_E1x(bp)) { 7793 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 7794 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 7795 } 7796 7797 bnx2x_init_block(bp, BLOCK_IGU, init_phase); 7798 7799 if (!CHIP_IS_E1x(bp)) { 7800 int dsb_idx = 0; 7801 /** 7802 * Producer memory: 7803 * E2 mode: address 0-135 match to the mapping memory; 7804 * 136 - PF0 default prod; 137 - PF1 default prod; 7805 * 138 - PF2 default prod; 139 - PF3 default prod; 7806 * 140 - PF0 attn prod; 141 - PF1 attn prod; 7807 * 142 - PF2 attn prod; 143 - PF3 attn prod; 7808 * 144-147 reserved. 7809 * 7810 * E1.5 mode - In backward compatible mode; 7811 * for non default SB; each even line in the memory 7812 * holds the U producer and each odd line hold 7813 * the C producer. The first 128 producers are for 7814 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 7815 * producers are for the DSB for each PF. 7816 * Each PF has five segments: (the order inside each 7817 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 7818 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 7819 * 144-147 attn prods; 7820 */ 7821 /* non-default-status-blocks */ 7822 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7823 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; 7824 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) { 7825 prod_offset = (bp->igu_base_sb + sb_idx) * 7826 num_segs; 7827 7828 for (i = 0; i < num_segs; i++) { 7829 addr = IGU_REG_PROD_CONS_MEMORY + 7830 (prod_offset + i) * 4; 7831 REG_WR(bp, addr, 0); 7832 } 7833 /* send consumer update with value 0 */ 7834 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx, 7835 USTORM_ID, 0, IGU_INT_NOP, 1); 7836 bnx2x_igu_clear_sb(bp, 7837 bp->igu_base_sb + sb_idx); 7838 } 7839 7840 /* default-status-blocks */ 7841 num_segs = CHIP_INT_MODE_IS_BC(bp) ? 7842 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; 7843 7844 if (CHIP_MODE_IS_4_PORT(bp)) 7845 dsb_idx = BP_FUNC(bp); 7846 else 7847 dsb_idx = BP_VN(bp); 7848 7849 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ? 7850 IGU_BC_BASE_DSB_PROD + dsb_idx : 7851 IGU_NORM_BASE_DSB_PROD + dsb_idx); 7852 7853 /* 7854 * igu prods come in chunks of E1HVN_MAX (4) - 7855 * does not matters what is the current chip mode 7856 */ 7857 for (i = 0; i < (num_segs * E1HVN_MAX); 7858 i += E1HVN_MAX) { 7859 addr = IGU_REG_PROD_CONS_MEMORY + 7860 (prod_offset + i)*4; 7861 REG_WR(bp, addr, 0); 7862 } 7863 /* send consumer update with 0 */ 7864 if (CHIP_INT_MODE_IS_BC(bp)) { 7865 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7866 USTORM_ID, 0, IGU_INT_NOP, 1); 7867 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7868 CSTORM_ID, 0, IGU_INT_NOP, 1); 7869 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7870 XSTORM_ID, 0, IGU_INT_NOP, 1); 7871 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7872 TSTORM_ID, 0, IGU_INT_NOP, 1); 7873 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7874 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7875 } else { 7876 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7877 USTORM_ID, 0, IGU_INT_NOP, 1); 7878 bnx2x_ack_sb(bp, bp->igu_dsb_id, 7879 ATTENTION_ID, 0, IGU_INT_NOP, 1); 7880 } 7881 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id); 7882 7883 /* !!! These should become driver const once 7884 rf-tool supports split-68 const */ 7885 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 7886 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 7887 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 7888 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 7889 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 7890 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 7891 } 7892 } 7893 7894 /* Reset PCIE errors for debug */ 7895 REG_WR(bp, 0x2114, 0xffffffff); 7896 REG_WR(bp, 0x2120, 0xffffffff); 7897 7898 if (CHIP_IS_E1x(bp)) { 7899 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ 7900 main_mem_base = HC_REG_MAIN_MEMORY + 7901 BP_PORT(bp) * (main_mem_size * 4); 7902 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; 7903 main_mem_width = 8; 7904 7905 val = REG_RD(bp, main_mem_prty_clr); 7906 if (val) 7907 DP(NETIF_MSG_HW, 7908 "Hmmm... Parity errors in HC block during function init (0x%x)!\n", 7909 val); 7910 7911 /* Clear "false" parity errors in MSI-X table */ 7912 for (i = main_mem_base; 7913 i < main_mem_base + main_mem_size * 4; 7914 i += main_mem_width) { 7915 bnx2x_read_dmae(bp, i, main_mem_width / 4); 7916 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), 7917 i, main_mem_width / 4); 7918 } 7919 /* Clear HC parity attention */ 7920 REG_RD(bp, main_mem_prty_clr); 7921 } 7922 7923#ifdef BNX2X_STOP_ON_ERROR 7924 /* Enable STORMs SP logging */ 7925 REG_WR8(bp, BAR_USTRORM_INTMEM + 7926 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7927 REG_WR8(bp, BAR_TSTRORM_INTMEM + 7928 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7929 REG_WR8(bp, BAR_CSTRORM_INTMEM + 7930 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7931 REG_WR8(bp, BAR_XSTRORM_INTMEM + 7932 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); 7933#endif 7934 7935 bnx2x_phy_probe(&bp->link_params); 7936 7937 return 0; 7938} 7939 7940void bnx2x_free_mem_cnic(struct bnx2x *bp) 7941{ 7942 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE); 7943 7944 if (!CHIP_IS_E1x(bp)) 7945 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, 7946 sizeof(struct host_hc_status_block_e2)); 7947 else 7948 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping, 7949 sizeof(struct host_hc_status_block_e1x)); 7950 7951 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 7952} 7953 7954void bnx2x_free_mem(struct bnx2x *bp) 7955{ 7956 int i; 7957 7958 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7959 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7960 7961 if (IS_VF(bp)) 7962 return; 7963 7964 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 7965 sizeof(struct host_sp_status_block)); 7966 7967 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7968 sizeof(struct bnx2x_slowpath)); 7969 7970 for (i = 0; i < L2_ILT_LINES(bp); i++) 7971 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, 7972 bp->context[i].size); 7973 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); 7974 7975 BNX2X_FREE(bp->ilt->lines); 7976 7977 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 7978 7979 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7980 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7981 7982 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ); 7983 7984 bnx2x_iov_free_mem(bp); 7985} 7986 7987int bnx2x_alloc_mem_cnic(struct bnx2x *bp) 7988{ 7989 if (!CHIP_IS_E1x(bp)) 7990 /* size = the status block + ramrod buffers */ 7991 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 7992 sizeof(struct host_hc_status_block_e2)); 7993 else 7994 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, 7995 &bp->cnic_sb_mapping, 7996 sizeof(struct 7997 host_hc_status_block_e1x)); 7998 7999 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) 8000 /* allocate searcher T2 table, as it wasn't allocated before */ 8001 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 8002 8003 /* write address to which L5 should insert its values */ 8004 bp->cnic_eth_dev.addr_drv_info_to_mcp = 8005 &bp->slowpath->drv_info_to_mcp; 8006 8007 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC)) 8008 goto alloc_mem_err; 8009 8010 return 0; 8011 8012alloc_mem_err: 8013 bnx2x_free_mem_cnic(bp); 8014 BNX2X_ERR("Can't allocate memory\n"); 8015 return -ENOMEM; 8016} 8017 8018int bnx2x_alloc_mem(struct bnx2x *bp) 8019{ 8020 int i, allocated, context_size; 8021 8022 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) 8023 /* allocate searcher T2 table */ 8024 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 8025 8026 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 8027 sizeof(struct host_sp_status_block)); 8028 8029 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 8030 sizeof(struct bnx2x_slowpath)); 8031 8032 /* Allocate memory for CDU context: 8033 * This memory is allocated separately and not in the generic ILT 8034 * functions because CDU differs in few aspects: 8035 * 1. There are multiple entities allocating memory for context - 8036 * 'regular' driver, CNIC and SRIOV driver. Each separately controls 8037 * its own ILT lines. 8038 * 2. Since CDU page-size is not a single 4KB page (which is the case 8039 * for the other ILT clients), to be efficient we want to support 8040 * allocation of sub-page-size in the last entry. 8041 * 3. Context pointers are used by the driver to pass to FW / update 8042 * the context (for the other ILT clients the pointers are used just to 8043 * free the memory during unload). 8044 */ 8045 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); 8046 8047 for (i = 0, allocated = 0; allocated < context_size; i++) { 8048 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 8049 (context_size - allocated)); 8050 BNX2X_PCI_ALLOC(bp->context[i].vcxt, 8051 &bp->context[i].cxt_mapping, 8052 bp->context[i].size); 8053 allocated += bp->context[i].size; 8054 } 8055 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 8056 8057 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 8058 goto alloc_mem_err; 8059 8060 if (bnx2x_iov_alloc_mem(bp)) 8061 goto alloc_mem_err; 8062 8063 /* Slow path ring */ 8064 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 8065 8066 /* EQ */ 8067 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 8068 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8069 8070 return 0; 8071 8072alloc_mem_err: 8073 bnx2x_free_mem(bp); 8074 BNX2X_ERR("Can't allocate memory\n"); 8075 return -ENOMEM; 8076} 8077 8078/* 8079 * Init service functions 8080 */ 8081 8082int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, 8083 struct bnx2x_vlan_mac_obj *obj, bool set, 8084 int mac_type, unsigned long *ramrod_flags) 8085{ 8086 int rc; 8087 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 8088 8089 memset(&ramrod_param, 0, sizeof(ramrod_param)); 8090 8091 /* Fill general parameters */ 8092 ramrod_param.vlan_mac_obj = obj; 8093 ramrod_param.ramrod_flags = *ramrod_flags; 8094 8095 /* Fill a user request section if needed */ 8096 if (!test_bit(RAMROD_CONT, ramrod_flags)) { 8097 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); 8098 8099 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); 8100 8101 /* Set the command: ADD or DEL */ 8102 if (set) 8103 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 8104 else 8105 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; 8106 } 8107 8108 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 8109 8110 if (rc == -EEXIST) { 8111 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); 8112 /* do not treat adding same MAC as error */ 8113 rc = 0; 8114 } else if (rc < 0) 8115 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); 8116 8117 return rc; 8118} 8119 8120int bnx2x_del_all_macs(struct bnx2x *bp, 8121 struct bnx2x_vlan_mac_obj *mac_obj, 8122 int mac_type, bool wait_for_comp) 8123{ 8124 int rc; 8125 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 8126 8127 /* Wait for completion of requested */ 8128 if (wait_for_comp) 8129 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8130 8131 /* Set the mac type of addresses we want to clear */ 8132 __set_bit(mac_type, &vlan_mac_flags); 8133 8134 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); 8135 if (rc < 0) 8136 BNX2X_ERR("Failed to delete MACs: %d\n", rc); 8137 8138 return rc; 8139} 8140 8141int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) 8142{ 8143 if (is_zero_ether_addr(bp->dev->dev_addr) && 8144 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 8145 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 8146 "Ignoring Zero MAC for STORAGE SD mode\n"); 8147 return 0; 8148 } 8149 8150 if (IS_PF(bp)) { 8151 unsigned long ramrod_flags = 0; 8152 8153 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 8154 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8155 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, 8156 &bp->sp_objs->mac_obj, set, 8157 BNX2X_ETH_MAC, &ramrod_flags); 8158 } else { /* vf */ 8159 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, 8160 bp->fp->index, true); 8161 } 8162} 8163 8164int bnx2x_setup_leading(struct bnx2x *bp) 8165{ 8166 if (IS_PF(bp)) 8167 return bnx2x_setup_queue(bp, &bp->fp[0], true); 8168 else /* VF */ 8169 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); 8170} 8171 8172/** 8173 * bnx2x_set_int_mode - configure interrupt mode 8174 * 8175 * @bp: driver handle 8176 * 8177 * In case of MSI-X it will also try to enable MSI-X. 8178 */ 8179int bnx2x_set_int_mode(struct bnx2x *bp) 8180{ 8181 int rc = 0; 8182 8183 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { 8184 BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); 8185 return -EINVAL; 8186 } 8187 8188 switch (int_mode) { 8189 case BNX2X_INT_MODE_MSIX: 8190 /* attempt to enable msix */ 8191 rc = bnx2x_enable_msix(bp); 8192 8193 /* msix attained */ 8194 if (!rc) 8195 return 0; 8196 8197 /* vfs use only msix */ 8198 if (rc && IS_VF(bp)) 8199 return rc; 8200 8201 /* failed to enable multiple MSI-X */ 8202 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 8203 bp->num_queues, 8204 1 + bp->num_cnic_queues); 8205 8206 /* falling through... */ 8207 case BNX2X_INT_MODE_MSI: 8208 bnx2x_enable_msi(bp); 8209 8210 /* falling through... */ 8211 case BNX2X_INT_MODE_INTX: 8212 bp->num_ethernet_queues = 1; 8213 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 8214 BNX2X_DEV_INFO("set number of queues to 1\n"); 8215 break; 8216 default: 8217 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n"); 8218 return -EINVAL; 8219 } 8220 return 0; 8221} 8222 8223/* must be called prior to any HW initializations */ 8224static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp) 8225{ 8226 if (IS_SRIOV(bp)) 8227 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS; 8228 return L2_ILT_LINES(bp); 8229} 8230 8231void bnx2x_ilt_set_info(struct bnx2x *bp) 8232{ 8233 struct ilt_client_info *ilt_client; 8234 struct bnx2x_ilt *ilt = BP_ILT(bp); 8235 u16 line = 0; 8236 8237 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp)); 8238 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line); 8239 8240 /* CDU */ 8241 ilt_client = &ilt->clients[ILT_CLIENT_CDU]; 8242 ilt_client->client_num = ILT_CLIENT_CDU; 8243 ilt_client->page_size = CDU_ILT_PAGE_SZ; 8244 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 8245 ilt_client->start = line; 8246 line += bnx2x_cid_ilt_lines(bp); 8247 8248 if (CNIC_SUPPORT(bp)) 8249 line += CNIC_ILT_LINES; 8250 ilt_client->end = line - 1; 8251 8252 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8253 ilt_client->start, 8254 ilt_client->end, 8255 ilt_client->page_size, 8256 ilt_client->flags, 8257 ilog2(ilt_client->page_size >> 12)); 8258 8259 /* QM */ 8260 if (QM_INIT(bp->qm_cid_count)) { 8261 ilt_client = &ilt->clients[ILT_CLIENT_QM]; 8262 ilt_client->client_num = ILT_CLIENT_QM; 8263 ilt_client->page_size = QM_ILT_PAGE_SZ; 8264 ilt_client->flags = 0; 8265 ilt_client->start = line; 8266 8267 /* 4 bytes for each cid */ 8268 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4, 8269 QM_ILT_PAGE_SZ); 8270 8271 ilt_client->end = line - 1; 8272 8273 DP(NETIF_MSG_IFUP, 8274 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8275 ilt_client->start, 8276 ilt_client->end, 8277 ilt_client->page_size, 8278 ilt_client->flags, 8279 ilog2(ilt_client->page_size >> 12)); 8280 } 8281 8282 if (CNIC_SUPPORT(bp)) { 8283 /* SRC */ 8284 ilt_client = &ilt->clients[ILT_CLIENT_SRC]; 8285 ilt_client->client_num = ILT_CLIENT_SRC; 8286 ilt_client->page_size = SRC_ILT_PAGE_SZ; 8287 ilt_client->flags = 0; 8288 ilt_client->start = line; 8289 line += SRC_ILT_LINES; 8290 ilt_client->end = line - 1; 8291 8292 DP(NETIF_MSG_IFUP, 8293 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8294 ilt_client->start, 8295 ilt_client->end, 8296 ilt_client->page_size, 8297 ilt_client->flags, 8298 ilog2(ilt_client->page_size >> 12)); 8299 8300 /* TM */ 8301 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 8302 ilt_client->client_num = ILT_CLIENT_TM; 8303 ilt_client->page_size = TM_ILT_PAGE_SZ; 8304 ilt_client->flags = 0; 8305 ilt_client->start = line; 8306 line += TM_ILT_LINES; 8307 ilt_client->end = line - 1; 8308 8309 DP(NETIF_MSG_IFUP, 8310 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 8311 ilt_client->start, 8312 ilt_client->end, 8313 ilt_client->page_size, 8314 ilt_client->flags, 8315 ilog2(ilt_client->page_size >> 12)); 8316 } 8317 8318 BUG_ON(line > ILT_MAX_LINES); 8319} 8320 8321/** 8322 * bnx2x_pf_q_prep_init - prepare INIT transition parameters 8323 * 8324 * @bp: driver handle 8325 * @fp: pointer to fastpath 8326 * @init_params: pointer to parameters structure 8327 * 8328 * parameters configured: 8329 * - HC configuration 8330 * - Queue's CDU context 8331 */ 8332static void bnx2x_pf_q_prep_init(struct bnx2x *bp, 8333 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 8334{ 8335 u8 cos; 8336 int cxt_index, cxt_offset; 8337 8338 /* FCoE Queue uses Default SB, thus has no HC capabilities */ 8339 if (!IS_FCOE_FP(fp)) { 8340 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); 8341 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); 8342 8343 /* If HC is supported, enable host coalescing in the transition 8344 * to INIT state. 8345 */ 8346 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); 8347 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); 8348 8349 /* HC rate */ 8350 init_params->rx.hc_rate = bp->rx_ticks ? 8351 (1000000 / bp->rx_ticks) : 0; 8352 init_params->tx.hc_rate = bp->tx_ticks ? 8353 (1000000 / bp->tx_ticks) : 0; 8354 8355 /* FW SB ID */ 8356 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = 8357 fp->fw_sb_id; 8358 8359 /* 8360 * CQ index among the SB indices: FCoE clients uses the default 8361 * SB, therefore it's different. 8362 */ 8363 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 8364 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; 8365 } 8366 8367 /* set maximum number of COSs supported by this queue */ 8368 init_params->max_cos = fp->max_cos; 8369 8370 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n", 8371 fp->index, init_params->max_cos); 8372 8373 /* set the context pointers queue object */ 8374 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { 8375 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; 8376 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * 8377 ILT_PAGE_CIDS); 8378 init_params->cxts[cos] = 8379 &bp->context[cxt_index].vcxt[cxt_offset].eth; 8380 } 8381} 8382 8383static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8384 struct bnx2x_queue_state_params *q_params, 8385 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 8386 int tx_index, bool leading) 8387{ 8388 memset(tx_only_params, 0, sizeof(*tx_only_params)); 8389 8390 /* Set the command */ 8391 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; 8392 8393 /* Set tx-only QUEUE flags: don't zero statistics */ 8394 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); 8395 8396 /* choose the index of the cid to send the slow path on */ 8397 tx_only_params->cid_index = tx_index; 8398 8399 /* Set general TX_ONLY_SETUP parameters */ 8400 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); 8401 8402 /* Set Tx TX_ONLY_SETUP parameters */ 8403 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); 8404 8405 DP(NETIF_MSG_IFUP, 8406 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n", 8407 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], 8408 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, 8409 tx_only_params->gen_params.spcl_id, tx_only_params->flags); 8410 8411 /* send the ramrod */ 8412 return bnx2x_queue_state_change(bp, q_params); 8413} 8414 8415/** 8416 * bnx2x_setup_queue - setup queue 8417 * 8418 * @bp: driver handle 8419 * @fp: pointer to fastpath 8420 * @leading: is leading 8421 * 8422 * This function performs 2 steps in a Queue state machine 8423 * actually: 1) RESET->INIT 2) INIT->SETUP 8424 */ 8425 8426int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 8427 bool leading) 8428{ 8429 struct bnx2x_queue_state_params q_params = {NULL}; 8430 struct bnx2x_queue_setup_params *setup_params = 8431 &q_params.params.setup; 8432 struct bnx2x_queue_setup_tx_only_params *tx_only_params = 8433 &q_params.params.tx_only; 8434 int rc; 8435 u8 tx_index; 8436 8437 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index); 8438 8439 /* reset IGU state skip FCoE L2 queue */ 8440 if (!IS_FCOE_FP(fp)) 8441 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 8442 IGU_INT_ENABLE, 0); 8443 8444 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8445 /* We want to wait for completion in this context */ 8446 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8447 8448 /* Prepare the INIT parameters */ 8449 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); 8450 8451 /* Set the command */ 8452 q_params.cmd = BNX2X_Q_CMD_INIT; 8453 8454 /* Change the state to INIT */ 8455 rc = bnx2x_queue_state_change(bp, &q_params); 8456 if (rc) { 8457 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); 8458 return rc; 8459 } 8460 8461 DP(NETIF_MSG_IFUP, "init complete\n"); 8462 8463 /* Now move the Queue to the SETUP state... */ 8464 memset(setup_params, 0, sizeof(*setup_params)); 8465 8466 /* Set QUEUE flags */ 8467 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); 8468 8469 /* Set general SETUP parameters */ 8470 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, 8471 FIRST_TX_COS_INDEX); 8472 8473 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, 8474 &setup_params->rxq_params); 8475 8476 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, 8477 FIRST_TX_COS_INDEX); 8478 8479 /* Set the command */ 8480 q_params.cmd = BNX2X_Q_CMD_SETUP; 8481 8482 if (IS_FCOE_FP(fp)) 8483 bp->fcoe_init = true; 8484 8485 /* Change the state to SETUP */ 8486 rc = bnx2x_queue_state_change(bp, &q_params); 8487 if (rc) { 8488 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); 8489 return rc; 8490 } 8491 8492 /* loop through the relevant tx-only indices */ 8493 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8494 tx_index < fp->max_cos; 8495 tx_index++) { 8496 8497 /* prepare and send tx-only ramrod*/ 8498 rc = bnx2x_setup_tx_only(bp, fp, &q_params, 8499 tx_only_params, tx_index, leading); 8500 if (rc) { 8501 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", 8502 fp->index, tx_index); 8503 return rc; 8504 } 8505 } 8506 8507 return rc; 8508} 8509 8510static int bnx2x_stop_queue(struct bnx2x *bp, int index) 8511{ 8512 struct bnx2x_fastpath *fp = &bp->fp[index]; 8513 struct bnx2x_fp_txdata *txdata; 8514 struct bnx2x_queue_state_params q_params = {NULL}; 8515 int rc, tx_index; 8516 8517 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); 8518 8519 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 8520 /* We want to wait for completion in this context */ 8521 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 8522 8523 /* close tx-only connections */ 8524 for (tx_index = FIRST_TX_ONLY_COS_INDEX; 8525 tx_index < fp->max_cos; 8526 tx_index++){ 8527 8528 /* ascertain this is a normal queue*/ 8529 txdata = fp->txdata_ptr[tx_index]; 8530 8531 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", 8532 txdata->txq_index); 8533 8534 /* send halt terminate on tx-only connection */ 8535 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8536 memset(&q_params.params.terminate, 0, 8537 sizeof(q_params.params.terminate)); 8538 q_params.params.terminate.cid_index = tx_index; 8539 8540 rc = bnx2x_queue_state_change(bp, &q_params); 8541 if (rc) 8542 return rc; 8543 8544 /* send halt terminate on tx-only connection */ 8545 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8546 memset(&q_params.params.cfc_del, 0, 8547 sizeof(q_params.params.cfc_del)); 8548 q_params.params.cfc_del.cid_index = tx_index; 8549 rc = bnx2x_queue_state_change(bp, &q_params); 8550 if (rc) 8551 return rc; 8552 } 8553 /* Stop the primary connection: */ 8554 /* ...halt the connection */ 8555 q_params.cmd = BNX2X_Q_CMD_HALT; 8556 rc = bnx2x_queue_state_change(bp, &q_params); 8557 if (rc) 8558 return rc; 8559 8560 /* ...terminate the connection */ 8561 q_params.cmd = BNX2X_Q_CMD_TERMINATE; 8562 memset(&q_params.params.terminate, 0, 8563 sizeof(q_params.params.terminate)); 8564 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; 8565 rc = bnx2x_queue_state_change(bp, &q_params); 8566 if (rc) 8567 return rc; 8568 /* ...delete cfc entry */ 8569 q_params.cmd = BNX2X_Q_CMD_CFC_DEL; 8570 memset(&q_params.params.cfc_del, 0, 8571 sizeof(q_params.params.cfc_del)); 8572 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; 8573 return bnx2x_queue_state_change(bp, &q_params); 8574} 8575 8576static void bnx2x_reset_func(struct bnx2x *bp) 8577{ 8578 int port = BP_PORT(bp); 8579 int func = BP_FUNC(bp); 8580 int i; 8581 8582 /* Disable the function in the FW */ 8583 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); 8584 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); 8585 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); 8586 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 8587 8588 /* FP SBs */ 8589 for_each_eth_queue(bp, i) { 8590 struct bnx2x_fastpath *fp = &bp->fp[i]; 8591 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8592 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), 8593 SB_DISABLED); 8594 } 8595 8596 if (CNIC_LOADED(bp)) 8597 /* CNIC SB */ 8598 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8599 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET 8600 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED); 8601 8602 /* SP SB */ 8603 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8604 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8605 SB_DISABLED); 8606 8607 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) 8608 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 8609 0); 8610 8611 /* Configure IGU */ 8612 if (bp->common.int_block == INT_BLOCK_HC) { 8613 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 8614 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 8615 } else { 8616 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); 8617 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8618 } 8619 8620 if (CNIC_LOADED(bp)) { 8621 /* Disable Timer scan */ 8622 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8623 /* 8624 * Wait for at least 10ms and up to 2 second for the timers 8625 * scan to complete 8626 */ 8627 for (i = 0; i < 200; i++) { 8628 usleep_range(10000, 20000); 8629 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8630 break; 8631 } 8632 } 8633 /* Clear ILT */ 8634 bnx2x_clear_func_ilt(bp, func); 8635 8636 /* Timers workaround bug for E2: if this is vnic-3, 8637 * we need to set the entire ilt range for this timers. 8638 */ 8639 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { 8640 struct ilt_client_info ilt_cli; 8641 /* use dummy TM client */ 8642 memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); 8643 ilt_cli.start = 0; 8644 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; 8645 ilt_cli.client_num = ILT_CLIENT_TM; 8646 8647 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR); 8648 } 8649 8650 /* this assumes that reset_port() called before reset_func()*/ 8651 if (!CHIP_IS_E1x(bp)) 8652 bnx2x_pf_disable(bp); 8653 8654 bp->dmae_ready = 0; 8655} 8656 8657static void bnx2x_reset_port(struct bnx2x *bp) 8658{ 8659 int port = BP_PORT(bp); 8660 u32 val; 8661 8662 /* Reset physical Link */ 8663 bnx2x__link_reset(bp); 8664 8665 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 8666 8667 /* Do not rcv packets to BRB */ 8668 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); 8669 /* Do not direct rcv packets that are not for MCP to the BRB */ 8670 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : 8671 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); 8672 8673 /* Configure AEU */ 8674 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); 8675 8676 msleep(100); 8677 /* Check for BRB port occupancy */ 8678 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 8679 if (val) 8680 DP(NETIF_MSG_IFDOWN, 8681 "BRB1 is not empty %d blocks are occupied\n", val); 8682 8683 /* TODO: Close Doorbell port? */ 8684} 8685 8686static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8687{ 8688 struct bnx2x_func_state_params func_params = {NULL}; 8689 8690 /* Prepare parameters for function state transitions */ 8691 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8692 8693 func_params.f_obj = &bp->func_obj; 8694 func_params.cmd = BNX2X_F_CMD_HW_RESET; 8695 8696 func_params.params.hw_init.load_phase = load_code; 8697 8698 return bnx2x_func_state_change(bp, &func_params); 8699} 8700 8701static int bnx2x_func_stop(struct bnx2x *bp) 8702{ 8703 struct bnx2x_func_state_params func_params = {NULL}; 8704 int rc; 8705 8706 /* Prepare parameters for function state transitions */ 8707 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 8708 func_params.f_obj = &bp->func_obj; 8709 func_params.cmd = BNX2X_F_CMD_STOP; 8710 8711 /* 8712 * Try to stop the function the 'good way'. If fails (in case 8713 * of a parity error during bnx2x_chip_cleanup()) and we are 8714 * not in a debug mode, perform a state transaction in order to 8715 * enable further HW_RESET transaction. 8716 */ 8717 rc = bnx2x_func_state_change(bp, &func_params); 8718 if (rc) { 8719#ifdef BNX2X_STOP_ON_ERROR 8720 return rc; 8721#else 8722 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n"); 8723 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); 8724 return bnx2x_func_state_change(bp, &func_params); 8725#endif 8726 } 8727 8728 return 0; 8729} 8730 8731/** 8732 * bnx2x_send_unload_req - request unload mode from the MCP. 8733 * 8734 * @bp: driver handle 8735 * @unload_mode: requested function's unload mode 8736 * 8737 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 8738 */ 8739u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) 8740{ 8741 u32 reset_code = 0; 8742 int port = BP_PORT(bp); 8743 8744 /* Select the UNLOAD request mode */ 8745 if (unload_mode == UNLOAD_NORMAL) 8746 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8747 8748 else if (bp->flags & NO_WOL_FLAG) 8749 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 8750 8751 else if (bp->wol) { 8752 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 8753 u8 *mac_addr = bp->dev->dev_addr; 8754 struct pci_dev *pdev = bp->pdev; 8755 u32 val; 8756 u16 pmc; 8757 8758 /* The mac address is written to entries 1-4 to 8759 * preserve entry 0 which is used by the PMF 8760 */ 8761 u8 entry = (BP_VN(bp) + 1)*8; 8762 8763 val = (mac_addr[0] << 8) | mac_addr[1]; 8764 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); 8765 8766 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 8767 (mac_addr[4] << 8) | mac_addr[5]; 8768 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); 8769 8770 /* Enable the PME and clear the status */ 8771 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); 8772 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; 8773 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); 8774 8775 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 8776 8777 } else 8778 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8779 8780 /* Send the request to the MCP */ 8781 if (!BP_NOMCP(bp)) 8782 reset_code = bnx2x_fw_command(bp, reset_code, 0); 8783 else { 8784 int path = BP_PATH(bp); 8785 8786 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n", 8787 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 8788 bnx2x_load_count[path][2]); 8789 bnx2x_load_count[path][0]--; 8790 bnx2x_load_count[path][1 + port]--; 8791 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n", 8792 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], 8793 bnx2x_load_count[path][2]); 8794 if (bnx2x_load_count[path][0] == 0) 8795 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 8796 else if (bnx2x_load_count[path][1 + port] == 0) 8797 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 8798 else 8799 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 8800 } 8801 8802 return reset_code; 8803} 8804 8805/** 8806 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 8807 * 8808 * @bp: driver handle 8809 * @keep_link: true iff link should be kept up 8810 */ 8811void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) 8812{ 8813 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; 8814 8815 /* Report UNLOAD_DONE to MCP */ 8816 if (!BP_NOMCP(bp)) 8817 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); 8818} 8819 8820static int bnx2x_func_wait_started(struct bnx2x *bp) 8821{ 8822 int tout = 50; 8823 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8824 8825 if (!bp->port.pmf) 8826 return 0; 8827 8828 /* 8829 * (assumption: No Attention from MCP at this stage) 8830 * PMF probably in the middle of TX disable/enable transaction 8831 * 1. Sync IRS for default SB 8832 * 2. Sync SP queue - this guarantees us that attention handling started 8833 * 3. Wait, that TX disable/enable transaction completes 8834 * 8835 * 1+2 guarantee that if DCBx attention was scheduled it already changed 8836 * pending bit of transaction from STARTED-->TX_STOPPED, if we already 8837 * received completion for the transaction the state is TX_STOPPED. 8838 * State will return to STARTED after completion of TX_STOPPED-->STARTED 8839 * transaction. 8840 */ 8841 8842 /* make sure default SB ISR is done */ 8843 if (msix) 8844 synchronize_irq(bp->msix_table[0].vector); 8845 else 8846 synchronize_irq(bp->pdev->irq); 8847 8848 flush_workqueue(bnx2x_wq); 8849 8850 while (bnx2x_func_get_state(bp, &bp->func_obj) != 8851 BNX2X_F_STATE_STARTED && tout--) 8852 msleep(20); 8853 8854 if (bnx2x_func_get_state(bp, &bp->func_obj) != 8855 BNX2X_F_STATE_STARTED) { 8856#ifdef BNX2X_STOP_ON_ERROR 8857 BNX2X_ERR("Wrong function state\n"); 8858 return -EBUSY; 8859#else 8860 /* 8861 * Failed to complete the transaction in a "good way" 8862 * Force both transactions with CLR bit 8863 */ 8864 struct bnx2x_func_state_params func_params = {NULL}; 8865 8866 DP(NETIF_MSG_IFDOWN, 8867 "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); 8868 8869 func_params.f_obj = &bp->func_obj; 8870 __set_bit(RAMROD_DRV_CLR_ONLY, 8871 &func_params.ramrod_flags); 8872 8873 /* STARTED-->TX_ST0PPED */ 8874 func_params.cmd = BNX2X_F_CMD_TX_STOP; 8875 bnx2x_func_state_change(bp, &func_params); 8876 8877 /* TX_ST0PPED-->STARTED */ 8878 func_params.cmd = BNX2X_F_CMD_TX_START; 8879 return bnx2x_func_state_change(bp, &func_params); 8880#endif 8881 } 8882 8883 return 0; 8884} 8885 8886void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) 8887{ 8888 int port = BP_PORT(bp); 8889 int i, rc = 0; 8890 u8 cos; 8891 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 8892 u32 reset_code; 8893 8894 /* Wait until tx fastpath tasks complete */ 8895 for_each_tx_queue(bp, i) { 8896 struct bnx2x_fastpath *fp = &bp->fp[i]; 8897 8898 for_each_cos_in_tx_queue(fp, cos) 8899 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); 8900#ifdef BNX2X_STOP_ON_ERROR 8901 if (rc) 8902 return; 8903#endif 8904 } 8905 8906 /* Give HW time to discard old tx messages */ 8907 usleep_range(1000, 2000); 8908 8909 /* Clean all ETH MACs */ 8910 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, 8911 false); 8912 if (rc < 0) 8913 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); 8914 8915 /* Clean up UC list */ 8916 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, 8917 true); 8918 if (rc < 0) 8919 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", 8920 rc); 8921 8922 /* Disable LLH */ 8923 if (!CHIP_IS_E1(bp)) 8924 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 8925 8926 /* Set "drop all" (stop Rx). 8927 * We need to take a netif_addr_lock() here in order to prevent 8928 * a race between the completion code and this code. 8929 */ 8930 netif_addr_lock_bh(bp->dev); 8931 /* Schedule the rx_mode command */ 8932 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 8933 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 8934 else 8935 bnx2x_set_storm_rx_mode(bp); 8936 8937 /* Cleanup multicast configuration */ 8938 rparam.mcast_obj = &bp->mcast_obj; 8939 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 8940 if (rc < 0) 8941 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); 8942 8943 netif_addr_unlock_bh(bp->dev); 8944 8945 bnx2x_iov_chip_cleanup(bp); 8946 8947 /* 8948 * Send the UNLOAD_REQUEST to the MCP. This will return if 8949 * this function should perform FUNC, PORT or COMMON HW 8950 * reset. 8951 */ 8952 reset_code = bnx2x_send_unload_req(bp, unload_mode); 8953 8954 /* 8955 * (assumption: No Attention from MCP at this stage) 8956 * PMF probably in the middle of TX disable/enable transaction 8957 */ 8958 rc = bnx2x_func_wait_started(bp); 8959 if (rc) { 8960 BNX2X_ERR("bnx2x_func_wait_started failed\n"); 8961#ifdef BNX2X_STOP_ON_ERROR 8962 return; 8963#endif 8964 } 8965 8966 /* Close multi and leading connections 8967 * Completions for ramrods are collected in a synchronous way 8968 */ 8969 for_each_eth_queue(bp, i) 8970 if (bnx2x_stop_queue(bp, i)) 8971#ifdef BNX2X_STOP_ON_ERROR 8972 return; 8973#else 8974 goto unload_error; 8975#endif 8976 8977 if (CNIC_LOADED(bp)) { 8978 for_each_cnic_queue(bp, i) 8979 if (bnx2x_stop_queue(bp, i)) 8980#ifdef BNX2X_STOP_ON_ERROR 8981 return; 8982#else 8983 goto unload_error; 8984#endif 8985 } 8986 8987 /* If SP settings didn't get completed so far - something 8988 * very wrong has happen. 8989 */ 8990 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) 8991 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); 8992 8993#ifndef BNX2X_STOP_ON_ERROR 8994unload_error: 8995#endif 8996 rc = bnx2x_func_stop(bp); 8997 if (rc) { 8998 BNX2X_ERR("Function stop failed!\n"); 8999#ifdef BNX2X_STOP_ON_ERROR 9000 return; 9001#endif 9002 } 9003 9004 /* Disable HW interrupts, NAPI */ 9005 bnx2x_netif_stop(bp, 1); 9006 /* Delete all NAPI objects */ 9007 bnx2x_del_all_napi(bp); 9008 if (CNIC_LOADED(bp)) 9009 bnx2x_del_all_napi_cnic(bp); 9010 9011 /* Release IRQs */ 9012 bnx2x_free_irq(bp); 9013 9014 /* Reset the chip */ 9015 rc = bnx2x_reset_hw(bp, reset_code); 9016 if (rc) 9017 BNX2X_ERR("HW_RESET failed\n"); 9018 9019 /* Report UNLOAD_DONE to MCP */ 9020 bnx2x_send_unload_done(bp, keep_link); 9021} 9022 9023void bnx2x_disable_close_the_gate(struct bnx2x *bp) 9024{ 9025 u32 val; 9026 9027 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n"); 9028 9029 if (CHIP_IS_E1(bp)) { 9030 int port = BP_PORT(bp); 9031 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 9032 MISC_REG_AEU_MASK_ATTN_FUNC_0; 9033 9034 val = REG_RD(bp, addr); 9035 val &= ~(0x300); 9036 REG_WR(bp, addr, val); 9037 } else { 9038 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); 9039 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | 9040 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); 9041 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val); 9042 } 9043} 9044 9045/* Close gates #2, #3 and #4: */ 9046static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 9047{ 9048 u32 val; 9049 9050 /* Gates #2 and #4a are closed/opened for "not E1" only */ 9051 if (!CHIP_IS_E1(bp)) { 9052 /* #4 */ 9053 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); 9054 /* #2 */ 9055 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); 9056 } 9057 9058 /* #3 */ 9059 if (CHIP_IS_E1x(bp)) { 9060 /* Prevent interrupts from HC on both ports */ 9061 val = REG_RD(bp, HC_REG_CONFIG_1); 9062 REG_WR(bp, HC_REG_CONFIG_1, 9063 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : 9064 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); 9065 9066 val = REG_RD(bp, HC_REG_CONFIG_0); 9067 REG_WR(bp, HC_REG_CONFIG_0, 9068 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : 9069 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); 9070 } else { 9071 /* Prevent incoming interrupts in IGU */ 9072 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 9073 9074 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, 9075 (!close) ? 9076 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : 9077 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); 9078 } 9079 9080 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n", 9081 close ? "closing" : "opening"); 9082 mmiowb(); 9083} 9084 9085#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */ 9086 9087static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val) 9088{ 9089 /* Do some magic... */ 9090 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 9091 *magic_val = val & SHARED_MF_CLP_MAGIC; 9092 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); 9093} 9094 9095/** 9096 * bnx2x_clp_reset_done - restore the value of the `magic' bit. 9097 * 9098 * @bp: driver handle 9099 * @magic_val: old value of the `magic' bit. 9100 */ 9101static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 9102{ 9103 /* Restore the `magic' bit value... */ 9104 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 9105 MF_CFG_WR(bp, shared_mf_config.clp_mb, 9106 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 9107} 9108 9109/** 9110 * bnx2x_reset_mcp_prep - prepare for MCP reset. 9111 * 9112 * @bp: driver handle 9113 * @magic_val: old value of 'magic' bit. 9114 * 9115 * Takes care of CLP configurations. 9116 */ 9117static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val) 9118{ 9119 u32 shmem; 9120 u32 validity_offset; 9121 9122 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n"); 9123 9124 /* Set `magic' bit in order to save MF config */ 9125 if (!CHIP_IS_E1(bp)) 9126 bnx2x_clp_reset_prep(bp, magic_val); 9127 9128 /* Get shmem offset */ 9129 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9130 validity_offset = 9131 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]); 9132 9133 /* Clear validity map flags */ 9134 if (shmem > 0) 9135 REG_WR(bp, shmem + validity_offset, 0); 9136} 9137 9138#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ 9139#define MCP_ONE_TIMEOUT 100 /* 100 ms */ 9140 9141/** 9142 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT 9143 * 9144 * @bp: driver handle 9145 */ 9146static void bnx2x_mcp_wait_one(struct bnx2x *bp) 9147{ 9148 /* special handling for emulation and FPGA, 9149 wait 10 times longer */ 9150 if (CHIP_REV_IS_SLOW(bp)) 9151 msleep(MCP_ONE_TIMEOUT*10); 9152 else 9153 msleep(MCP_ONE_TIMEOUT); 9154} 9155 9156/* 9157 * initializes bp->common.shmem_base and waits for validity signature to appear 9158 */ 9159static int bnx2x_init_shmem(struct bnx2x *bp) 9160{ 9161 int cnt = 0; 9162 u32 val = 0; 9163 9164 do { 9165 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 9166 if (bp->common.shmem_base) { 9167 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9168 if (val & SHR_MEM_VALIDITY_MB) 9169 return 0; 9170 } 9171 9172 bnx2x_mcp_wait_one(bp); 9173 9174 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); 9175 9176 BNX2X_ERR("BAD MCP validity signature\n"); 9177 9178 return -ENODEV; 9179} 9180 9181static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val) 9182{ 9183 int rc = bnx2x_init_shmem(bp); 9184 9185 /* Restore the `magic' bit value */ 9186 if (!CHIP_IS_E1(bp)) 9187 bnx2x_clp_reset_done(bp, magic_val); 9188 9189 return rc; 9190} 9191 9192static void bnx2x_pxp_prep(struct bnx2x *bp) 9193{ 9194 if (!CHIP_IS_E1(bp)) { 9195 REG_WR(bp, PXP2_REG_RD_START_INIT, 0); 9196 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); 9197 mmiowb(); 9198 } 9199} 9200 9201/* 9202 * Reset the whole chip except for: 9203 * - PCIE core 9204 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by 9205 * one reset bit) 9206 * - IGU 9207 * - MISC (including AEU) 9208 * - GRC 9209 * - RBCN, RBCP 9210 */ 9211static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) 9212{ 9213 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; 9214 u32 global_bits2, stay_reset2; 9215 9216 /* 9217 * Bits that have to be set in reset_mask2 if we want to reset 'global' 9218 * (per chip) blocks. 9219 */ 9220 global_bits2 = 9221 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 9222 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 9223 9224 /* Don't reset the following blocks. 9225 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be 9226 * reset, as in 4 port device they might still be owned 9227 * by the MCP (there is only one leader per path). 9228 */ 9229 not_reset_mask1 = 9230 MISC_REGISTERS_RESET_REG_1_RST_HC | 9231 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 9232 MISC_REGISTERS_RESET_REG_1_RST_PXP; 9233 9234 not_reset_mask2 = 9235 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | 9236 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | 9237 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | 9238 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | 9239 MISC_REGISTERS_RESET_REG_2_RST_RBCN | 9240 MISC_REGISTERS_RESET_REG_2_RST_GRC | 9241 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 9242 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 9243 MISC_REGISTERS_RESET_REG_2_RST_ATC | 9244 MISC_REGISTERS_RESET_REG_2_PGLC | 9245 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | 9246 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | 9247 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | 9248 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | 9249 MISC_REGISTERS_RESET_REG_2_UMAC0 | 9250 MISC_REGISTERS_RESET_REG_2_UMAC1; 9251 9252 /* 9253 * Keep the following blocks in reset: 9254 * - all xxMACs are handled by the bnx2x_link code. 9255 */ 9256 stay_reset2 = 9257 MISC_REGISTERS_RESET_REG_2_XMAC | 9258 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 9259 9260 /* Full reset masks according to the chip */ 9261 reset_mask1 = 0xffffffff; 9262 9263 if (CHIP_IS_E1(bp)) 9264 reset_mask2 = 0xffff; 9265 else if (CHIP_IS_E1H(bp)) 9266 reset_mask2 = 0x1ffff; 9267 else if (CHIP_IS_E2(bp)) 9268 reset_mask2 = 0xfffff; 9269 else /* CHIP_IS_E3 */ 9270 reset_mask2 = 0x3ffffff; 9271 9272 /* Don't reset global blocks unless we need to */ 9273 if (!global) 9274 reset_mask2 &= ~global_bits2; 9275 9276 /* 9277 * In case of attention in the QM, we need to reset PXP 9278 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM 9279 * because otherwise QM reset would release 'close the gates' shortly 9280 * before resetting the PXP, then the PSWRQ would send a write 9281 * request to PGLUE. Then when PXP is reset, PGLUE would try to 9282 * read the payload data from PSWWR, but PSWWR would not 9283 * respond. The write queue in PGLUE would stuck, dmae commands 9284 * would not return. Therefore it's important to reset the second 9285 * reset register (containing the 9286 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the 9287 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM 9288 * bit). 9289 */ 9290 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 9291 reset_mask2 & (~not_reset_mask2)); 9292 9293 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 9294 reset_mask1 & (~not_reset_mask1)); 9295 9296 barrier(); 9297 mmiowb(); 9298 9299 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 9300 reset_mask2 & (~stay_reset2)); 9301 9302 barrier(); 9303 mmiowb(); 9304 9305 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); 9306 mmiowb(); 9307} 9308 9309/** 9310 * bnx2x_er_poll_igu_vq - poll for pending writes bit. 9311 * It should get cleared in no more than 1s. 9312 * 9313 * @bp: driver handle 9314 * 9315 * It should get cleared in no more than 1s. Returns 0 if 9316 * pending writes bit gets cleared. 9317 */ 9318static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) 9319{ 9320 u32 cnt = 1000; 9321 u32 pend_bits = 0; 9322 9323 do { 9324 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); 9325 9326 if (pend_bits == 0) 9327 break; 9328 9329 usleep_range(1000, 2000); 9330 } while (cnt-- > 0); 9331 9332 if (cnt <= 0) { 9333 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", 9334 pend_bits); 9335 return -EBUSY; 9336 } 9337 9338 return 0; 9339} 9340 9341static int bnx2x_process_kill(struct bnx2x *bp, bool global) 9342{ 9343 int cnt = 1000; 9344 u32 val = 0; 9345 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 9346 u32 tags_63_32 = 0; 9347 9348 /* Empty the Tetris buffer, wait for 1s */ 9349 do { 9350 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT); 9351 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT); 9352 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 9353 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 9354 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 9355 if (CHIP_IS_E3(bp)) 9356 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32); 9357 9358 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 9359 ((port_is_idle_0 & 0x1) == 0x1) && 9360 ((port_is_idle_1 & 0x1) == 0x1) && 9361 (pgl_exp_rom2 == 0xffffffff) && 9362 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff))) 9363 break; 9364 usleep_range(1000, 2000); 9365 } while (cnt-- > 0); 9366 9367 if (cnt <= 0) { 9368 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n"); 9369 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", 9370 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, 9371 pgl_exp_rom2); 9372 return -EAGAIN; 9373 } 9374 9375 barrier(); 9376 9377 /* Close gates #2, #3 and #4 */ 9378 bnx2x_set_234_gates(bp, true); 9379 9380 /* Poll for IGU VQs for 57712 and newer chips */ 9381 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) 9382 return -EAGAIN; 9383 9384 /* TBD: Indicate that "process kill" is in progress to MCP */ 9385 9386 /* Clear "unprepared" bit */ 9387 REG_WR(bp, MISC_REG_UNPREPARED, 0); 9388 barrier(); 9389 9390 /* Make sure all is written to the chip before the reset */ 9391 mmiowb(); 9392 9393 /* Wait for 1ms to empty GLUE and PCI-E core queues, 9394 * PSWHST, GRC and PSWRD Tetris buffer. 9395 */ 9396 usleep_range(1000, 2000); 9397 9398 /* Prepare to chip reset: */ 9399 /* MCP */ 9400 if (global) 9401 bnx2x_reset_mcp_prep(bp, &val); 9402 9403 /* PXP */ 9404 bnx2x_pxp_prep(bp); 9405 barrier(); 9406 9407 /* reset the chip */ 9408 bnx2x_process_kill_chip_reset(bp, global); 9409 barrier(); 9410 9411 /* clear errors in PGB */ 9412 if (!CHIP_IS_E1x(bp)) 9413 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); 9414 9415 /* Recover after reset: */ 9416 /* MCP */ 9417 if (global && bnx2x_reset_mcp_comp(bp, val)) 9418 return -EAGAIN; 9419 9420 /* TBD: Add resetting the NO_MCP mode DB here */ 9421 9422 /* Open the gates #2, #3 and #4 */ 9423 bnx2x_set_234_gates(bp, false); 9424 9425 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a 9426 * reset state, re-enable attentions. */ 9427 9428 return 0; 9429} 9430 9431static int bnx2x_leader_reset(struct bnx2x *bp) 9432{ 9433 int rc = 0; 9434 bool global = bnx2x_reset_is_global(bp); 9435 u32 load_code; 9436 9437 /* if not going to reset MCP - load "fake" driver to reset HW while 9438 * driver is owner of the HW 9439 */ 9440 if (!global && !BP_NOMCP(bp)) { 9441 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 9442 DRV_MSG_CODE_LOAD_REQ_WITH_LFA); 9443 if (!load_code) { 9444 BNX2X_ERR("MCP response failure, aborting\n"); 9445 rc = -EAGAIN; 9446 goto exit_leader_reset; 9447 } 9448 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && 9449 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { 9450 BNX2X_ERR("MCP unexpected resp, aborting\n"); 9451 rc = -EAGAIN; 9452 goto exit_leader_reset2; 9453 } 9454 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 9455 if (!load_code) { 9456 BNX2X_ERR("MCP response failure, aborting\n"); 9457 rc = -EAGAIN; 9458 goto exit_leader_reset2; 9459 } 9460 } 9461 9462 /* Try to recover after the failure */ 9463 if (bnx2x_process_kill(bp, global)) { 9464 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n", 9465 BP_PATH(bp)); 9466 rc = -EAGAIN; 9467 goto exit_leader_reset2; 9468 } 9469 9470 /* 9471 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver 9472 * state. 9473 */ 9474 bnx2x_set_reset_done(bp); 9475 if (global) 9476 bnx2x_clear_reset_global(bp); 9477 9478exit_leader_reset2: 9479 /* unload "fake driver" if it was loaded */ 9480 if (!global && !BP_NOMCP(bp)) { 9481 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 9482 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 9483 } 9484exit_leader_reset: 9485 bp->is_leader = 0; 9486 bnx2x_release_leader_lock(bp); 9487 smp_mb(); 9488 return rc; 9489} 9490 9491static void bnx2x_recovery_failed(struct bnx2x *bp) 9492{ 9493 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 9494 9495 /* Disconnect this device */ 9496 netif_device_detach(bp->dev); 9497 9498 /* 9499 * Block ifup for all function on this engine until "process kill" 9500 * or power cycle. 9501 */ 9502 bnx2x_set_reset_in_progress(bp); 9503 9504 /* Shut down the power */ 9505 bnx2x_set_power_state(bp, PCI_D3hot); 9506 9507 bp->recovery_state = BNX2X_RECOVERY_FAILED; 9508 9509 smp_mb(); 9510} 9511 9512/* 9513 * Assumption: runs under rtnl lock. This together with the fact 9514 * that it's called only from bnx2x_sp_rtnl() ensure that it 9515 * will never be called when netif_running(bp->dev) is false. 9516 */ 9517static void bnx2x_parity_recover(struct bnx2x *bp) 9518{ 9519 bool global = false; 9520 u32 error_recovered, error_unrecovered; 9521 bool is_parity; 9522 9523 DP(NETIF_MSG_HW, "Handling parity\n"); 9524 while (1) { 9525 switch (bp->recovery_state) { 9526 case BNX2X_RECOVERY_INIT: 9527 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); 9528 is_parity = bnx2x_chk_parity_attn(bp, &global, false); 9529 WARN_ON(!is_parity); 9530 9531 /* Try to get a LEADER_LOCK HW lock */ 9532 if (bnx2x_trylock_leader_lock(bp)) { 9533 bnx2x_set_reset_in_progress(bp); 9534 /* 9535 * Check if there is a global attention and if 9536 * there was a global attention, set the global 9537 * reset bit. 9538 */ 9539 9540 if (global) 9541 bnx2x_set_reset_global(bp); 9542 9543 bp->is_leader = 1; 9544 } 9545 9546 /* Stop the driver */ 9547 /* If interface has been removed - break */ 9548 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) 9549 return; 9550 9551 bp->recovery_state = BNX2X_RECOVERY_WAIT; 9552 9553 /* Ensure "is_leader", MCP command sequence and 9554 * "recovery_state" update values are seen on other 9555 * CPUs. 9556 */ 9557 smp_mb(); 9558 break; 9559 9560 case BNX2X_RECOVERY_WAIT: 9561 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); 9562 if (bp->is_leader) { 9563 int other_engine = BP_PATH(bp) ? 0 : 1; 9564 bool other_load_status = 9565 bnx2x_get_load_status(bp, other_engine); 9566 bool load_status = 9567 bnx2x_get_load_status(bp, BP_PATH(bp)); 9568 global = bnx2x_reset_is_global(bp); 9569 9570 /* 9571 * In case of a parity in a global block, let 9572 * the first leader that performs a 9573 * leader_reset() reset the global blocks in 9574 * order to clear global attentions. Otherwise 9575 * the gates will remain closed for that 9576 * engine. 9577 */ 9578 if (load_status || 9579 (global && other_load_status)) { 9580 /* Wait until all other functions get 9581 * down. 9582 */ 9583 schedule_delayed_work(&bp->sp_rtnl_task, 9584 HZ/10); 9585 return; 9586 } else { 9587 /* If all other functions got down - 9588 * try to bring the chip back to 9589 * normal. In any case it's an exit 9590 * point for a leader. 9591 */ 9592 if (bnx2x_leader_reset(bp)) { 9593 bnx2x_recovery_failed(bp); 9594 return; 9595 } 9596 9597 /* If we are here, means that the 9598 * leader has succeeded and doesn't 9599 * want to be a leader any more. Try 9600 * to continue as a none-leader. 9601 */ 9602 break; 9603 } 9604 } else { /* non-leader */ 9605 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { 9606 /* Try to get a LEADER_LOCK HW lock as 9607 * long as a former leader may have 9608 * been unloaded by the user or 9609 * released a leadership by another 9610 * reason. 9611 */ 9612 if (bnx2x_trylock_leader_lock(bp)) { 9613 /* I'm a leader now! Restart a 9614 * switch case. 9615 */ 9616 bp->is_leader = 1; 9617 break; 9618 } 9619 9620 schedule_delayed_work(&bp->sp_rtnl_task, 9621 HZ/10); 9622 return; 9623 9624 } else { 9625 /* 9626 * If there was a global attention, wait 9627 * for it to be cleared. 9628 */ 9629 if (bnx2x_reset_is_global(bp)) { 9630 schedule_delayed_work( 9631 &bp->sp_rtnl_task, 9632 HZ/10); 9633 return; 9634 } 9635 9636 error_recovered = 9637 bp->eth_stats.recoverable_error; 9638 error_unrecovered = 9639 bp->eth_stats.unrecoverable_error; 9640 bp->recovery_state = 9641 BNX2X_RECOVERY_NIC_LOADING; 9642 if (bnx2x_nic_load(bp, LOAD_NORMAL)) { 9643 error_unrecovered++; 9644 netdev_err(bp->dev, 9645 "Recovery failed. Power cycle needed\n"); 9646 /* Disconnect this device */ 9647 netif_device_detach(bp->dev); 9648 /* Shut down the power */ 9649 bnx2x_set_power_state( 9650 bp, PCI_D3hot); 9651 smp_mb(); 9652 } else { 9653 bp->recovery_state = 9654 BNX2X_RECOVERY_DONE; 9655 error_recovered++; 9656 smp_mb(); 9657 } 9658 bp->eth_stats.recoverable_error = 9659 error_recovered; 9660 bp->eth_stats.unrecoverable_error = 9661 error_unrecovered; 9662 9663 return; 9664 } 9665 } 9666 default: 9667 return; 9668 } 9669 } 9670} 9671 9672static int bnx2x_close(struct net_device *dev); 9673 9674/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is 9675 * scheduled on a general queue in order to prevent a dead lock. 9676 */ 9677static void bnx2x_sp_rtnl_task(struct work_struct *work) 9678{ 9679 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); 9680 9681 rtnl_lock(); 9682 9683 if (!netif_running(bp->dev)) { 9684 rtnl_unlock(); 9685 return; 9686 } 9687 9688 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { 9689#ifdef BNX2X_STOP_ON_ERROR 9690 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9691 "you will need to reboot when done\n"); 9692 goto sp_rtnl_not_reset; 9693#endif 9694 /* 9695 * Clear all pending SP commands as we are going to reset the 9696 * function anyway. 9697 */ 9698 bp->sp_rtnl_state = 0; 9699 smp_mb(); 9700 9701 bnx2x_parity_recover(bp); 9702 9703 rtnl_unlock(); 9704 return; 9705 } 9706 9707 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { 9708#ifdef BNX2X_STOP_ON_ERROR 9709 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n" 9710 "you will need to reboot when done\n"); 9711 goto sp_rtnl_not_reset; 9712#endif 9713 9714 /* 9715 * Clear all pending SP commands as we are going to reset the 9716 * function anyway. 9717 */ 9718 bp->sp_rtnl_state = 0; 9719 smp_mb(); 9720 9721 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 9722 bnx2x_nic_load(bp, LOAD_NORMAL); 9723 9724 rtnl_unlock(); 9725 return; 9726 } 9727#ifdef BNX2X_STOP_ON_ERROR 9728sp_rtnl_not_reset: 9729#endif 9730 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 9731 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 9732 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state)) 9733 bnx2x_after_function_update(bp); 9734 /* 9735 * in case of fan failure we need to reset id if the "stop on error" 9736 * debug flag is set, since we trying to prevent permanent overheating 9737 * damage 9738 */ 9739 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) { 9740 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n"); 9741 netif_device_detach(bp->dev); 9742 bnx2x_close(bp->dev); 9743 rtnl_unlock(); 9744 return; 9745 } 9746 9747 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) { 9748 DP(BNX2X_MSG_SP, 9749 "sending set mcast vf pf channel message from rtnl sp-task\n"); 9750 bnx2x_vfpf_set_mcast(bp->dev); 9751 } 9752 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 9753 &bp->sp_rtnl_state)){ 9754 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) { 9755 bnx2x_tx_disable(bp); 9756 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n"); 9757 } 9758 } 9759 9760 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) { 9761 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n"); 9762 bnx2x_set_rx_mode_inner(bp); 9763 } 9764 9765 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 9766 &bp->sp_rtnl_state)) 9767 bnx2x_pf_set_vfs_vlan(bp); 9768 9769 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) { 9770 bnx2x_dcbx_stop_hw_tx(bp); 9771 bnx2x_dcbx_resume_hw_tx(bp); 9772 } 9773 9774 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9775 * can be called from other contexts as well) 9776 */ 9777 rtnl_unlock(); 9778 9779 /* enable SR-IOV if applicable */ 9780 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, 9781 &bp->sp_rtnl_state)) { 9782 bnx2x_disable_sriov(bp); 9783 bnx2x_enable_sriov(bp); 9784 } 9785} 9786 9787static void bnx2x_period_task(struct work_struct *work) 9788{ 9789 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); 9790 9791 if (!netif_running(bp->dev)) 9792 goto period_task_exit; 9793 9794 if (CHIP_REV_IS_SLOW(bp)) { 9795 BNX2X_ERR("period task called on emulation, ignoring\n"); 9796 goto period_task_exit; 9797 } 9798 9799 bnx2x_acquire_phy_lock(bp); 9800 /* 9801 * The barrier is needed to ensure the ordering between the writing to 9802 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and 9803 * the reading here. 9804 */ 9805 smp_mb(); 9806 if (bp->port.pmf) { 9807 bnx2x_period_func(&bp->link_params, &bp->link_vars); 9808 9809 /* Re-queue task in 1 sec */ 9810 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); 9811 } 9812 9813 bnx2x_release_phy_lock(bp); 9814period_task_exit: 9815 return; 9816} 9817 9818/* 9819 * Init service functions 9820 */ 9821 9822static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) 9823{ 9824 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0; 9825 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; 9826 return base + (BP_ABS_FUNC(bp)) * stride; 9827} 9828 9829static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, 9830 struct bnx2x_mac_vals *vals) 9831{ 9832 u32 val, base_addr, offset, mask, reset_reg; 9833 bool mac_stopped = false; 9834 u8 port = BP_PORT(bp); 9835 9836 /* reset addresses as they also mark which values were changed */ 9837 vals->bmac_addr = 0; 9838 vals->umac_addr = 0; 9839 vals->xmac_addr = 0; 9840 vals->emac_addr = 0; 9841 9842 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); 9843 9844 if (!CHIP_IS_E3(bp)) { 9845 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); 9846 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; 9847 if ((mask & reset_reg) && val) { 9848 u32 wb_data[2]; 9849 BNX2X_DEV_INFO("Disable bmac Rx\n"); 9850 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM 9851 : NIG_REG_INGRESS_BMAC0_MEM; 9852 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL 9853 : BIGMAC_REGISTER_BMAC_CONTROL; 9854 9855 /* 9856 * use rd/wr since we cannot use dmae. This is safe 9857 * since MCP won't access the bus due to the request 9858 * to unload, and no function on the path can be 9859 * loaded at this time. 9860 */ 9861 wb_data[0] = REG_RD(bp, base_addr + offset); 9862 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4); 9863 vals->bmac_addr = base_addr + offset; 9864 vals->bmac_val[0] = wb_data[0]; 9865 vals->bmac_val[1] = wb_data[1]; 9866 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 9867 REG_WR(bp, vals->bmac_addr, wb_data[0]); 9868 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]); 9869 } 9870 BNX2X_DEV_INFO("Disable emac Rx\n"); 9871 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4; 9872 vals->emac_val = REG_RD(bp, vals->emac_addr); 9873 REG_WR(bp, vals->emac_addr, 0); 9874 mac_stopped = true; 9875 } else { 9876 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { 9877 BNX2X_DEV_INFO("Disable xmac Rx\n"); 9878 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; 9879 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI); 9880 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9881 val & ~(1 << 1)); 9882 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI, 9883 val | (1 << 1)); 9884 vals->xmac_addr = base_addr + XMAC_REG_CTRL; 9885 vals->xmac_val = REG_RD(bp, vals->xmac_addr); 9886 REG_WR(bp, vals->xmac_addr, 0); 9887 mac_stopped = true; 9888 } 9889 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; 9890 if (mask & reset_reg) { 9891 BNX2X_DEV_INFO("Disable umac Rx\n"); 9892 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; 9893 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; 9894 vals->umac_val = REG_RD(bp, vals->umac_addr); 9895 REG_WR(bp, vals->umac_addr, 0); 9896 mac_stopped = true; 9897 } 9898 } 9899 9900 if (mac_stopped) 9901 msleep(20); 9902} 9903 9904#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 9905#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 9906#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9907#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9908 9909#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 9910#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 9911#define BCM_5710_UNDI_FW_MF_VERS (0x05) 9912#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) 9913#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) 9914static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 9915{ 9916 u8 major, minor, version; 9917 u32 fw; 9918 9919 /* Must check that FW is loaded */ 9920 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & 9921 MISC_REGISTERS_RESET_REG_1_RST_XSEM)) { 9922 BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n"); 9923 return false; 9924 } 9925 9926 /* Read Currently loaded FW version */ 9927 fw = REG_RD(bp, XSEM_REG_PRAM); 9928 major = fw & 0xff; 9929 minor = (fw >> 0x8) & 0xff; 9930 version = (fw >> 0x10) & 0xff; 9931 BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n", 9932 fw, major, minor, version); 9933 9934 if (major > BCM_5710_UNDI_FW_MF_MAJOR) 9935 return true; 9936 9937 if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && 9938 (minor > BCM_5710_UNDI_FW_MF_MINOR)) 9939 return true; 9940 9941 if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && 9942 (minor == BCM_5710_UNDI_FW_MF_MINOR) && 9943 (version >= BCM_5710_UNDI_FW_MF_VERS)) 9944 return true; 9945 9946 return false; 9947} 9948 9949static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp) 9950{ 9951 int i; 9952 9953 /* Due to legacy (FW) code, the first function on each engine has a 9954 * different offset macro from the rest of the functions. 9955 * Setting this for all 8 functions is harmless regardless of whether 9956 * this is actually a multi-function device. 9957 */ 9958 for (i = 0; i < 2; i++) 9959 REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1); 9960 9961 for (i = 2; i < 8; i++) 9962 REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1); 9963 9964 BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n"); 9965} 9966 9967static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc) 9968{ 9969 u16 rcq, bd; 9970 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9971 9972 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 9973 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 9974 9975 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 9976 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 9977 9978 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 9979 port, bd, rcq); 9980} 9981 9982static int bnx2x_prev_mcp_done(struct bnx2x *bp) 9983{ 9984 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 9985 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 9986 if (!rc) { 9987 BNX2X_ERR("MCP response failure, aborting\n"); 9988 return -EBUSY; 9989 } 9990 9991 return 0; 9992} 9993 9994static struct bnx2x_prev_path_list * 9995 bnx2x_prev_path_get_entry(struct bnx2x *bp) 9996{ 9997 struct bnx2x_prev_path_list *tmp_list; 9998 9999 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) 10000 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 10001 bp->pdev->bus->number == tmp_list->bus && 10002 BP_PATH(bp) == tmp_list->path) 10003 return tmp_list; 10004 10005 return NULL; 10006} 10007 10008static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp) 10009{ 10010 struct bnx2x_prev_path_list *tmp_list; 10011 int rc; 10012 10013 rc = down_interruptible(&bnx2x_prev_sem); 10014 if (rc) { 10015 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10016 return rc; 10017 } 10018 10019 tmp_list = bnx2x_prev_path_get_entry(bp); 10020 if (tmp_list) { 10021 tmp_list->aer = 1; 10022 rc = 0; 10023 } else { 10024 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n", 10025 BP_PATH(bp)); 10026 } 10027 10028 up(&bnx2x_prev_sem); 10029 10030 return rc; 10031} 10032 10033static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) 10034{ 10035 struct bnx2x_prev_path_list *tmp_list; 10036 bool rc = false; 10037 10038 if (down_trylock(&bnx2x_prev_sem)) 10039 return false; 10040 10041 tmp_list = bnx2x_prev_path_get_entry(bp); 10042 if (tmp_list) { 10043 if (tmp_list->aer) { 10044 DP(NETIF_MSG_HW, "Path %d was marked by AER\n", 10045 BP_PATH(bp)); 10046 } else { 10047 rc = true; 10048 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 10049 BP_PATH(bp)); 10050 } 10051 } 10052 10053 up(&bnx2x_prev_sem); 10054 10055 return rc; 10056} 10057 10058bool bnx2x_port_after_undi(struct bnx2x *bp) 10059{ 10060 struct bnx2x_prev_path_list *entry; 10061 bool val; 10062 10063 down(&bnx2x_prev_sem); 10064 10065 entry = bnx2x_prev_path_get_entry(bp); 10066 val = !!(entry && (entry->undi & (1 << BP_PORT(bp)))); 10067 10068 up(&bnx2x_prev_sem); 10069 10070 return val; 10071} 10072 10073static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) 10074{ 10075 struct bnx2x_prev_path_list *tmp_list; 10076 int rc; 10077 10078 rc = down_interruptible(&bnx2x_prev_sem); 10079 if (rc) { 10080 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10081 return rc; 10082 } 10083 10084 /* Check whether the entry for this path already exists */ 10085 tmp_list = bnx2x_prev_path_get_entry(bp); 10086 if (tmp_list) { 10087 if (!tmp_list->aer) { 10088 BNX2X_ERR("Re-Marking the path.\n"); 10089 } else { 10090 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n", 10091 BP_PATH(bp)); 10092 tmp_list->aer = 0; 10093 } 10094 up(&bnx2x_prev_sem); 10095 return 0; 10096 } 10097 up(&bnx2x_prev_sem); 10098 10099 /* Create an entry for this path and add it */ 10100 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 10101 if (!tmp_list) { 10102 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 10103 return -ENOMEM; 10104 } 10105 10106 tmp_list->bus = bp->pdev->bus->number; 10107 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 10108 tmp_list->path = BP_PATH(bp); 10109 tmp_list->aer = 0; 10110 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; 10111 10112 rc = down_interruptible(&bnx2x_prev_sem); 10113 if (rc) { 10114 BNX2X_ERR("Received %d when tried to take lock\n", rc); 10115 kfree(tmp_list); 10116 } else { 10117 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n", 10118 BP_PATH(bp)); 10119 list_add(&tmp_list->list, &bnx2x_prev_list); 10120 up(&bnx2x_prev_sem); 10121 } 10122 10123 return rc; 10124} 10125 10126static int bnx2x_do_flr(struct bnx2x *bp) 10127{ 10128 struct pci_dev *dev = bp->pdev; 10129 10130 if (CHIP_IS_E1x(bp)) { 10131 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); 10132 return -EINVAL; 10133 } 10134 10135 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ 10136 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { 10137 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", 10138 bp->common.bc_ver); 10139 return -EINVAL; 10140 } 10141 10142 if (!pci_wait_for_pending_transaction(dev)) 10143 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); 10144 10145 BNX2X_DEV_INFO("Initiating FLR\n"); 10146 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 10147 10148 return 0; 10149} 10150 10151static int bnx2x_prev_unload_uncommon(struct bnx2x *bp) 10152{ 10153 int rc; 10154 10155 BNX2X_DEV_INFO("Uncommon unload Flow\n"); 10156 10157 /* Test if previous unload process was already finished for this path */ 10158 if (bnx2x_prev_is_path_marked(bp)) 10159 return bnx2x_prev_mcp_done(bp); 10160 10161 BNX2X_DEV_INFO("Path is unmarked\n"); 10162 10163 /* If function has FLR capabilities, and existing FW version matches 10164 * the one required, then FLR will be sufficient to clean any residue 10165 * left by previous driver 10166 */ 10167 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false); 10168 10169 if (!rc) { 10170 /* fw version is good */ 10171 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); 10172 rc = bnx2x_do_flr(bp); 10173 } 10174 10175 if (!rc) { 10176 /* FLR was performed */ 10177 BNX2X_DEV_INFO("FLR successful\n"); 10178 return 0; 10179 } 10180 10181 BNX2X_DEV_INFO("Could not FLR\n"); 10182 10183 /* Close the MCP request, return failure*/ 10184 rc = bnx2x_prev_mcp_done(bp); 10185 if (!rc) 10186 rc = BNX2X_PREV_WAIT_NEEDED; 10187 10188 return rc; 10189} 10190 10191static int bnx2x_prev_unload_common(struct bnx2x *bp) 10192{ 10193 u32 reset_reg, tmp_reg = 0, rc; 10194 bool prev_undi = false; 10195 struct bnx2x_mac_vals mac_vals; 10196 10197 /* It is possible a previous function received 'common' answer, 10198 * but hasn't loaded yet, therefore creating a scenario of 10199 * multiple functions receiving 'common' on the same path. 10200 */ 10201 BNX2X_DEV_INFO("Common unload Flow\n"); 10202 10203 memset(&mac_vals, 0, sizeof(mac_vals)); 10204 10205 if (bnx2x_prev_is_path_marked(bp)) 10206 return bnx2x_prev_mcp_done(bp); 10207 10208 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1); 10209 10210 /* Reset should be performed after BRB is emptied */ 10211 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10212 u32 timer_count = 1000; 10213 10214 /* Close the MAC Rx to prevent BRB from filling up */ 10215 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10216 10217 /* close LLH filters towards the BRB */ 10218 bnx2x_set_rx_filter(&bp->link_params, 0); 10219 10220 /* Check if the UNDI driver was previously loaded 10221 * UNDI driver initializes CID offset for normal bell to 0x7 10222 */ 10223 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { 10224 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 10225 if (tmp_reg == 0x7) { 10226 BNX2X_DEV_INFO("UNDI previously loaded\n"); 10227 prev_undi = true; 10228 /* clear the UNDI indication */ 10229 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 10230 /* clear possible idle check errors */ 10231 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); 10232 } 10233 } 10234 if (!CHIP_IS_E1x(bp)) 10235 /* block FW from writing to host */ 10236 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); 10237 10238 /* wait until BRB is empty */ 10239 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10240 while (timer_count) { 10241 u32 prev_brb = tmp_reg; 10242 10243 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); 10244 if (!tmp_reg) 10245 break; 10246 10247 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg); 10248 10249 /* reset timer as long as BRB actually gets emptied */ 10250 if (prev_brb > tmp_reg) 10251 timer_count = 1000; 10252 else 10253 timer_count--; 10254 10255 /* New UNDI FW supports MF and contains better 10256 * cleaning methods - might be redundant but harmless. 10257 */ 10258 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10259 bnx2x_prev_unload_undi_mf(bp); 10260 } else if (prev_undi) { 10261 /* If UNDI resides in memory, 10262 * manually increment it 10263 */ 10264 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); 10265 } 10266 udelay(10); 10267 } 10268 10269 if (!timer_count) 10270 BNX2X_ERR("Failed to empty BRB, hope for the best\n"); 10271 } 10272 10273 /* No packets are in the pipeline, path is ready for reset */ 10274 bnx2x_reset_common(bp); 10275 10276 if (mac_vals.xmac_addr) 10277 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); 10278 if (mac_vals.umac_addr) 10279 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); 10280 if (mac_vals.emac_addr) 10281 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); 10282 if (mac_vals.bmac_addr) { 10283 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]); 10284 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); 10285 } 10286 10287 rc = bnx2x_prev_mark_path(bp, prev_undi); 10288 if (rc) { 10289 bnx2x_prev_mcp_done(bp); 10290 return rc; 10291 } 10292 10293 return bnx2x_prev_mcp_done(bp); 10294} 10295 10296/* previous driver DMAE transaction may have occurred when pre-boot stage ended 10297 * and boot began, or when kdump kernel was loaded. Either case would invalidate 10298 * the addresses of the transaction, resulting in was-error bit set in the pci 10299 * causing all hw-to-host pcie transactions to timeout. If this happened we want 10300 * to clear the interrupt which detected this from the pglueb and the was done 10301 * bit 10302 */ 10303static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 10304{ 10305 if (!CHIP_IS_E1x(bp)) { 10306 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 10307 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 10308 DP(BNX2X_MSG_SP, 10309 "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); 10310 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 10311 1 << BP_FUNC(bp)); 10312 } 10313 } 10314} 10315 10316static int bnx2x_prev_unload(struct bnx2x *bp) 10317{ 10318 int time_counter = 10; 10319 u32 rc, fw, hw_lock_reg, hw_lock_val; 10320 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 10321 10322 /* clear hw from errors which may have resulted from an interrupted 10323 * dmae transaction. 10324 */ 10325 bnx2x_prev_interrupted_dmae(bp); 10326 10327 /* Release previously held locks */ 10328 hw_lock_reg = (BP_FUNC(bp) <= 5) ? 10329 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : 10330 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); 10331 10332 hw_lock_val = REG_RD(bp, hw_lock_reg); 10333 if (hw_lock_val) { 10334 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { 10335 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n"); 10336 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB, 10337 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp))); 10338 } 10339 10340 BNX2X_DEV_INFO("Release Previously held hw lock\n"); 10341 REG_WR(bp, hw_lock_reg, 0xffffffff); 10342 } else 10343 BNX2X_DEV_INFO("No need to release hw/nvram locks\n"); 10344 10345 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) { 10346 BNX2X_DEV_INFO("Release previously held alr\n"); 10347 bnx2x_release_alr(bp); 10348 } 10349 10350 do { 10351 int aer = 0; 10352 /* Lock MCP using an unload request */ 10353 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 10354 if (!fw) { 10355 BNX2X_ERR("MCP response failure, aborting\n"); 10356 rc = -EBUSY; 10357 break; 10358 } 10359 10360 rc = down_interruptible(&bnx2x_prev_sem); 10361 if (rc) { 10362 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n", 10363 rc); 10364 } else { 10365 /* If Path is marked by EEH, ignore unload status */ 10366 aer = !!(bnx2x_prev_path_get_entry(bp) && 10367 bnx2x_prev_path_get_entry(bp)->aer); 10368 up(&bnx2x_prev_sem); 10369 } 10370 10371 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) { 10372 rc = bnx2x_prev_unload_common(bp); 10373 break; 10374 } 10375 10376 /* non-common reply from MCP might require looping */ 10377 rc = bnx2x_prev_unload_uncommon(bp); 10378 if (rc != BNX2X_PREV_WAIT_NEEDED) 10379 break; 10380 10381 msleep(20); 10382 } while (--time_counter); 10383 10384 if (!time_counter || rc) { 10385 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n"); 10386 rc = -EPROBE_DEFER; 10387 } 10388 10389 /* Mark function if its port was used to boot from SAN */ 10390 if (bnx2x_port_after_undi(bp)) 10391 bp->link_params.feature_config_flags |= 10392 FEATURE_CONFIG_BOOT_FROM_SAN; 10393 10394 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 10395 10396 return rc; 10397} 10398 10399static void bnx2x_get_common_hwinfo(struct bnx2x *bp) 10400{ 10401 u32 val, val2, val3, val4, id, boot_mode; 10402 u16 pmc; 10403 10404 /* Get the chip revision id and number. */ 10405 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 10406 val = REG_RD(bp, MISC_REG_CHIP_NUM); 10407 id = ((val & 0xffff) << 16); 10408 val = REG_RD(bp, MISC_REG_CHIP_REV); 10409 id |= ((val & 0xf) << 12); 10410 10411 /* Metal is read from PCI regs, but we can't access >=0x400 from 10412 * the configuration space (so we need to reg_rd) 10413 */ 10414 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3); 10415 id |= (((val >> 24) & 0xf) << 4); 10416 val = REG_RD(bp, MISC_REG_BOND_ID); 10417 id |= (val & 0xf); 10418 bp->common.chip_id = id; 10419 10420 /* force 57811 according to MISC register */ 10421 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { 10422 if (CHIP_IS_57810(bp)) 10423 bp->common.chip_id = (CHIP_NUM_57811 << 16) | 10424 (bp->common.chip_id & 0x0000FFFF); 10425 else if (CHIP_IS_57810_MF(bp)) 10426 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) | 10427 (bp->common.chip_id & 0x0000FFFF); 10428 bp->common.chip_id |= 0x1; 10429 } 10430 10431 /* Set doorbell size */ 10432 bp->db_size = (1 << BNX2X_DB_SHIFT); 10433 10434 if (!CHIP_IS_E1x(bp)) { 10435 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); 10436 if ((val & 1) == 0) 10437 val = REG_RD(bp, MISC_REG_PORT4MODE_EN); 10438 else 10439 val = (val >> 1) & 1; 10440 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" : 10441 "2_PORT_MODE"); 10442 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE : 10443 CHIP_2_PORT_MODE; 10444 10445 if (CHIP_MODE_IS_4_PORT(bp)) 10446 bp->pfid = (bp->pf_num >> 1); /* 0..3 */ 10447 else 10448 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */ 10449 } else { 10450 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */ 10451 bp->pfid = bp->pf_num; /* 0..7 */ 10452 } 10453 10454 BNX2X_DEV_INFO("pf_id: %x", bp->pfid); 10455 10456 bp->link_params.chip_id = bp->common.chip_id; 10457 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 10458 10459 val = (REG_RD(bp, 0x2874) & 0x55); 10460 if ((bp->common.chip_id & 0x1) || 10461 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) { 10462 bp->flags |= ONE_PORT_FLAG; 10463 BNX2X_DEV_INFO("single port device\n"); 10464 } 10465 10466 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); 10467 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << 10468 (val & MCPR_NVM_CFG4_FLASH_SIZE)); 10469 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", 10470 bp->common.flash_size, bp->common.flash_size); 10471 10472 bnx2x_init_shmem(bp); 10473 10474 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? 10475 MISC_REG_GENERIC_CR_1 : 10476 MISC_REG_GENERIC_CR_0)); 10477 10478 bp->link_params.shmem_base = bp->common.shmem_base; 10479 bp->link_params.shmem2_base = bp->common.shmem2_base; 10480 if (SHMEM2_RD(bp, size) > 10481 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)])) 10482 bp->link_params.lfa_base = 10483 REG_RD(bp, bp->common.shmem2_base + 10484 (u32)offsetof(struct shmem2_region, 10485 lfa_host_addr[BP_PORT(bp)])); 10486 else 10487 bp->link_params.lfa_base = 0; 10488 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 10489 bp->common.shmem_base, bp->common.shmem2_base); 10490 10491 if (!bp->common.shmem_base) { 10492 BNX2X_DEV_INFO("MCP not active\n"); 10493 bp->flags |= NO_MCP_FLAG; 10494 return; 10495 } 10496 10497 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 10498 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 10499 10500 bp->link_params.hw_led_mode = ((bp->common.hw_config & 10501 SHARED_HW_CFG_LED_MODE_MASK) >> 10502 SHARED_HW_CFG_LED_MODE_SHIFT); 10503 10504 bp->link_params.feature_config_flags = 0; 10505 val = SHMEM_RD(bp, dev_info.shared_feature_config.config); 10506 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) 10507 bp->link_params.feature_config_flags |= 10508 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 10509 else 10510 bp->link_params.feature_config_flags &= 10511 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; 10512 10513 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8; 10514 bp->common.bc_ver = val; 10515 BNX2X_DEV_INFO("bc_ver %X\n", val); 10516 if (val < BNX2X_BC_VER) { 10517 /* for now only warn 10518 * later we might need to enforce this */ 10519 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n", 10520 BNX2X_BC_VER, val); 10521 } 10522 bp->link_params.feature_config_flags |= 10523 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 10524 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 10525 10526 bp->link_params.feature_config_flags |= 10527 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 10528 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 10529 bp->link_params.feature_config_flags |= 10530 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ? 10531 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0; 10532 bp->link_params.feature_config_flags |= 10533 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 10534 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 10535 10536 bp->link_params.feature_config_flags |= 10537 (val >= REQ_BC_VER_4_MT_SUPPORTED) ? 10538 FEATURE_CONFIG_MT_SUPPORT : 0; 10539 10540 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 10541 BC_SUPPORTS_PFC_STATS : 0; 10542 10543 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ? 10544 BC_SUPPORTS_FCOE_FEATURES : 0; 10545 10546 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? 10547 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; 10548 10549 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? 10550 BC_SUPPORTS_RMMOD_CMD : 0; 10551 10552 boot_mode = SHMEM_RD(bp, 10553 dev_info.port_feature_config[BP_PORT(bp)].mba_config) & 10554 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; 10555 switch (boot_mode) { 10556 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE: 10557 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE; 10558 break; 10559 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB: 10560 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI; 10561 break; 10562 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT: 10563 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE; 10564 break; 10565 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE: 10566 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE; 10567 break; 10568 } 10569 10570 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc); 10571 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; 10572 10573 BNX2X_DEV_INFO("%sWoL capable\n", 10574 (bp->flags & NO_WOL_FLAG) ? "not " : ""); 10575 10576 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 10577 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 10578 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 10579 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 10580 10581 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n", 10582 val, val2, val3, val4); 10583} 10584 10585#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 10586#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 10587 10588static int bnx2x_get_igu_cam_info(struct bnx2x *bp) 10589{ 10590 int pfid = BP_FUNC(bp); 10591 int igu_sb_id; 10592 u32 val; 10593 u8 fid, igu_sb_cnt = 0; 10594 10595 bp->igu_base_sb = 0xff; 10596 if (CHIP_INT_MODE_IS_BC(bp)) { 10597 int vn = BP_VN(bp); 10598 igu_sb_cnt = bp->igu_sb_cnt; 10599 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 10600 FP_SB_MAX_E1x; 10601 10602 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 10603 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 10604 10605 return 0; 10606 } 10607 10608 /* IGU in normal mode - read CAM */ 10609 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; 10610 igu_sb_id++) { 10611 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); 10612 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 10613 continue; 10614 fid = IGU_FID(val); 10615 if ((fid & IGU_FID_ENCODE_IS_PF)) { 10616 if ((fid & IGU_FID_PF_NUM_MASK) != pfid) 10617 continue; 10618 if (IGU_VEC(val) == 0) 10619 /* default status block */ 10620 bp->igu_dsb_id = igu_sb_id; 10621 else { 10622 if (bp->igu_base_sb == 0xff) 10623 bp->igu_base_sb = igu_sb_id; 10624 igu_sb_cnt++; 10625 } 10626 } 10627 } 10628 10629#ifdef CONFIG_PCI_MSI 10630 /* Due to new PF resource allocation by MFW T7.4 and above, it's 10631 * optional that number of CAM entries will not be equal to the value 10632 * advertised in PCI. 10633 * Driver should use the minimal value of both as the actual status 10634 * block count 10635 */ 10636 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); 10637#endif 10638 10639 if (igu_sb_cnt == 0) { 10640 BNX2X_ERR("CAM configuration error\n"); 10641 return -EINVAL; 10642 } 10643 10644 return 0; 10645} 10646 10647static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) 10648{ 10649 int cfg_size = 0, idx, port = BP_PORT(bp); 10650 10651 /* Aggregation of supported attributes of all external phys */ 10652 bp->port.supported[0] = 0; 10653 bp->port.supported[1] = 0; 10654 switch (bp->link_params.num_phys) { 10655 case 1: 10656 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported; 10657 cfg_size = 1; 10658 break; 10659 case 2: 10660 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported; 10661 cfg_size = 1; 10662 break; 10663 case 3: 10664 if (bp->link_params.multi_phy_config & 10665 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 10666 bp->port.supported[1] = 10667 bp->link_params.phy[EXT_PHY1].supported; 10668 bp->port.supported[0] = 10669 bp->link_params.phy[EXT_PHY2].supported; 10670 } else { 10671 bp->port.supported[0] = 10672 bp->link_params.phy[EXT_PHY1].supported; 10673 bp->port.supported[1] = 10674 bp->link_params.phy[EXT_PHY2].supported; 10675 } 10676 cfg_size = 2; 10677 break; 10678 } 10679 10680 if (!(bp->port.supported[0] || bp->port.supported[1])) { 10681 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n", 10682 SHMEM_RD(bp, 10683 dev_info.port_hw_config[port].external_phy_config), 10684 SHMEM_RD(bp, 10685 dev_info.port_hw_config[port].external_phy_config2)); 10686 return; 10687 } 10688 10689 if (CHIP_IS_E3(bp)) 10690 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); 10691 else { 10692 switch (switch_cfg) { 10693 case SWITCH_CFG_1G: 10694 bp->port.phy_addr = REG_RD( 10695 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); 10696 break; 10697 case SWITCH_CFG_10G: 10698 bp->port.phy_addr = REG_RD( 10699 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); 10700 break; 10701 default: 10702 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 10703 bp->port.link_config[0]); 10704 return; 10705 } 10706 } 10707 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 10708 /* mask what we support according to speed_cap_mask per configuration */ 10709 for (idx = 0; idx < cfg_size; idx++) { 10710 if (!(bp->link_params.speed_cap_mask[idx] & 10711 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 10712 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half; 10713 10714 if (!(bp->link_params.speed_cap_mask[idx] & 10715 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 10716 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full; 10717 10718 if (!(bp->link_params.speed_cap_mask[idx] & 10719 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 10720 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half; 10721 10722 if (!(bp->link_params.speed_cap_mask[idx] & 10723 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 10724 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full; 10725 10726 if (!(bp->link_params.speed_cap_mask[idx] & 10727 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 10728 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 10729 SUPPORTED_1000baseT_Full); 10730 10731 if (!(bp->link_params.speed_cap_mask[idx] & 10732 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 10733 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full; 10734 10735 if (!(bp->link_params.speed_cap_mask[idx] & 10736 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 10737 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full; 10738 10739 if (!(bp->link_params.speed_cap_mask[idx] & 10740 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 10741 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full; 10742 } 10743 10744 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0], 10745 bp->port.supported[1]); 10746} 10747 10748static void bnx2x_link_settings_requested(struct bnx2x *bp) 10749{ 10750 u32 link_config, idx, cfg_size = 0; 10751 bp->port.advertising[0] = 0; 10752 bp->port.advertising[1] = 0; 10753 switch (bp->link_params.num_phys) { 10754 case 1: 10755 case 2: 10756 cfg_size = 1; 10757 break; 10758 case 3: 10759 cfg_size = 2; 10760 break; 10761 } 10762 for (idx = 0; idx < cfg_size; idx++) { 10763 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 10764 link_config = bp->port.link_config[idx]; 10765 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 10766 case PORT_FEATURE_LINK_SPEED_AUTO: 10767 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 10768 bp->link_params.req_line_speed[idx] = 10769 SPEED_AUTO_NEG; 10770 bp->port.advertising[idx] |= 10771 bp->port.supported[idx]; 10772 if (bp->link_params.phy[EXT_PHY1].type == 10773 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 10774 bp->port.advertising[idx] |= 10775 (SUPPORTED_100baseT_Half | 10776 SUPPORTED_100baseT_Full); 10777 } else { 10778 /* force 10G, no AN */ 10779 bp->link_params.req_line_speed[idx] = 10780 SPEED_10000; 10781 bp->port.advertising[idx] |= 10782 (ADVERTISED_10000baseT_Full | 10783 ADVERTISED_FIBRE); 10784 continue; 10785 } 10786 break; 10787 10788 case PORT_FEATURE_LINK_SPEED_10M_FULL: 10789 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 10790 bp->link_params.req_line_speed[idx] = 10791 SPEED_10; 10792 bp->port.advertising[idx] |= 10793 (ADVERTISED_10baseT_Full | 10794 ADVERTISED_TP); 10795 } else { 10796 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10797 link_config, 10798 bp->link_params.speed_cap_mask[idx]); 10799 return; 10800 } 10801 break; 10802 10803 case PORT_FEATURE_LINK_SPEED_10M_HALF: 10804 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 10805 bp->link_params.req_line_speed[idx] = 10806 SPEED_10; 10807 bp->link_params.req_duplex[idx] = 10808 DUPLEX_HALF; 10809 bp->port.advertising[idx] |= 10810 (ADVERTISED_10baseT_Half | 10811 ADVERTISED_TP); 10812 } else { 10813 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10814 link_config, 10815 bp->link_params.speed_cap_mask[idx]); 10816 return; 10817 } 10818 break; 10819 10820 case PORT_FEATURE_LINK_SPEED_100M_FULL: 10821 if (bp->port.supported[idx] & 10822 SUPPORTED_100baseT_Full) { 10823 bp->link_params.req_line_speed[idx] = 10824 SPEED_100; 10825 bp->port.advertising[idx] |= 10826 (ADVERTISED_100baseT_Full | 10827 ADVERTISED_TP); 10828 } else { 10829 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10830 link_config, 10831 bp->link_params.speed_cap_mask[idx]); 10832 return; 10833 } 10834 break; 10835 10836 case PORT_FEATURE_LINK_SPEED_100M_HALF: 10837 if (bp->port.supported[idx] & 10838 SUPPORTED_100baseT_Half) { 10839 bp->link_params.req_line_speed[idx] = 10840 SPEED_100; 10841 bp->link_params.req_duplex[idx] = 10842 DUPLEX_HALF; 10843 bp->port.advertising[idx] |= 10844 (ADVERTISED_100baseT_Half | 10845 ADVERTISED_TP); 10846 } else { 10847 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10848 link_config, 10849 bp->link_params.speed_cap_mask[idx]); 10850 return; 10851 } 10852 break; 10853 10854 case PORT_FEATURE_LINK_SPEED_1G: 10855 if (bp->port.supported[idx] & 10856 SUPPORTED_1000baseT_Full) { 10857 bp->link_params.req_line_speed[idx] = 10858 SPEED_1000; 10859 bp->port.advertising[idx] |= 10860 (ADVERTISED_1000baseT_Full | 10861 ADVERTISED_TP); 10862 } else { 10863 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10864 link_config, 10865 bp->link_params.speed_cap_mask[idx]); 10866 return; 10867 } 10868 break; 10869 10870 case PORT_FEATURE_LINK_SPEED_2_5G: 10871 if (bp->port.supported[idx] & 10872 SUPPORTED_2500baseX_Full) { 10873 bp->link_params.req_line_speed[idx] = 10874 SPEED_2500; 10875 bp->port.advertising[idx] |= 10876 (ADVERTISED_2500baseX_Full | 10877 ADVERTISED_TP); 10878 } else { 10879 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10880 link_config, 10881 bp->link_params.speed_cap_mask[idx]); 10882 return; 10883 } 10884 break; 10885 10886 case PORT_FEATURE_LINK_SPEED_10G_CX4: 10887 if (bp->port.supported[idx] & 10888 SUPPORTED_10000baseT_Full) { 10889 bp->link_params.req_line_speed[idx] = 10890 SPEED_10000; 10891 bp->port.advertising[idx] |= 10892 (ADVERTISED_10000baseT_Full | 10893 ADVERTISED_FIBRE); 10894 } else { 10895 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 10896 link_config, 10897 bp->link_params.speed_cap_mask[idx]); 10898 return; 10899 } 10900 break; 10901 case PORT_FEATURE_LINK_SPEED_20G: 10902 bp->link_params.req_line_speed[idx] = SPEED_20000; 10903 10904 break; 10905 default: 10906 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n", 10907 link_config); 10908 bp->link_params.req_line_speed[idx] = 10909 SPEED_AUTO_NEG; 10910 bp->port.advertising[idx] = 10911 bp->port.supported[idx]; 10912 break; 10913 } 10914 10915 bp->link_params.req_flow_ctrl[idx] = (link_config & 10916 PORT_FEATURE_FLOW_CONTROL_MASK); 10917 if (bp->link_params.req_flow_ctrl[idx] == 10918 BNX2X_FLOW_CTRL_AUTO) { 10919 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg)) 10920 bp->link_params.req_flow_ctrl[idx] = 10921 BNX2X_FLOW_CTRL_NONE; 10922 else 10923 bnx2x_set_requested_fc(bp); 10924 } 10925 10926 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 10927 bp->link_params.req_line_speed[idx], 10928 bp->link_params.req_duplex[idx], 10929 bp->link_params.req_flow_ctrl[idx], 10930 bp->port.advertising[idx]); 10931 } 10932} 10933 10934static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 10935{ 10936 __be16 mac_hi_be = cpu_to_be16(mac_hi); 10937 __be32 mac_lo_be = cpu_to_be32(mac_lo); 10938 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be)); 10939 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be)); 10940} 10941 10942static void bnx2x_get_port_hwinfo(struct bnx2x *bp) 10943{ 10944 int port = BP_PORT(bp); 10945 u32 config; 10946 u32 ext_phy_type, ext_phy_config, eee_mode; 10947 10948 bp->link_params.bp = bp; 10949 bp->link_params.port = port; 10950 10951 bp->link_params.lane_config = 10952 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 10953 10954 bp->link_params.speed_cap_mask[0] = 10955 SHMEM_RD(bp, 10956 dev_info.port_hw_config[port].speed_capability_mask) & 10957 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 10958 bp->link_params.speed_cap_mask[1] = 10959 SHMEM_RD(bp, 10960 dev_info.port_hw_config[port].speed_capability_mask2) & 10961 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK; 10962 bp->port.link_config[0] = 10963 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 10964 10965 bp->port.link_config[1] = 10966 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2); 10967 10968 bp->link_params.multi_phy_config = 10969 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config); 10970 /* If the device is capable of WoL, set the default state according 10971 * to the HW 10972 */ 10973 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config); 10974 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 10975 (config & PORT_FEATURE_WOL_ENABLED)); 10976 10977 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 10978 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp)) 10979 bp->flags |= NO_ISCSI_FLAG; 10980 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == 10981 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp))) 10982 bp->flags |= NO_FCOE_FLAG; 10983 10984 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n", 10985 bp->link_params.lane_config, 10986 bp->link_params.speed_cap_mask[0], 10987 bp->port.link_config[0]); 10988 10989 bp->link_params.switch_cfg = (bp->port.link_config[0] & 10990 PORT_FEATURE_CONNECTED_SWITCH_MASK); 10991 bnx2x_phy_probe(&bp->link_params); 10992 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 10993 10994 bnx2x_link_settings_requested(bp); 10995 10996 /* 10997 * If connected directly, work with the internal PHY, otherwise, work 10998 * with the external PHY 10999 */ 11000 ext_phy_config = 11001 SHMEM_RD(bp, 11002 dev_info.port_hw_config[port].external_phy_config); 11003 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 11004 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 11005 bp->mdio.prtad = bp->port.phy_addr; 11006 11007 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 11008 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 11009 bp->mdio.prtad = 11010 XGXS_EXT_PHY_ADDR(ext_phy_config); 11011 11012 /* Configure link feature according to nvram value */ 11013 eee_mode = (((SHMEM_RD(bp, dev_info. 11014 port_feature_config[port].eee_power_mode)) & 11015 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> 11016 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); 11017 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { 11018 bp->link_params.eee_mode = EEE_MODE_ADV_LPI | 11019 EEE_MODE_ENABLE_LPI | 11020 EEE_MODE_OUTPUT_TIME; 11021 } else { 11022 bp->link_params.eee_mode = 0; 11023 } 11024} 11025 11026void bnx2x_get_iscsi_info(struct bnx2x *bp) 11027{ 11028 u32 no_flags = NO_ISCSI_FLAG; 11029 int port = BP_PORT(bp); 11030 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 11031 drv_lic_key[port].max_iscsi_conn); 11032 11033 if (!CNIC_SUPPORT(bp)) { 11034 bp->flags |= no_flags; 11035 return; 11036 } 11037 11038 /* Get the number of maximum allowed iSCSI connections */ 11039 bp->cnic_eth_dev.max_iscsi_conn = 11040 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 11041 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; 11042 11043 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n", 11044 bp->cnic_eth_dev.max_iscsi_conn); 11045 11046 /* 11047 * If maximum allowed number of connections is zero - 11048 * disable the feature. 11049 */ 11050 if (!bp->cnic_eth_dev.max_iscsi_conn) 11051 bp->flags |= no_flags; 11052} 11053 11054static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func) 11055{ 11056 /* Port info */ 11057 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 11058 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper); 11059 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 11060 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower); 11061 11062 /* Node info */ 11063 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 11064 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper); 11065 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 11066 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 11067} 11068 11069static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp) 11070{ 11071 u8 count = 0; 11072 11073 if (IS_MF(bp)) { 11074 u8 fid; 11075 11076 /* iterate over absolute function ids for this path: */ 11077 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) { 11078 if (IS_MF_SD(bp)) { 11079 u32 cfg = MF_CFG_RD(bp, 11080 func_mf_config[fid].config); 11081 11082 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) && 11083 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) == 11084 FUNC_MF_CFG_PROTOCOL_FCOE)) 11085 count++; 11086 } else { 11087 u32 cfg = MF_CFG_RD(bp, 11088 func_ext_config[fid]. 11089 func_cfg); 11090 11091 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) && 11092 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) 11093 count++; 11094 } 11095 } 11096 } else { /* SF */ 11097 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1; 11098 11099 for (port = 0; port < port_cnt; port++) { 11100 u32 lic = SHMEM_RD(bp, 11101 drv_lic_key[port].max_fcoe_conn) ^ 11102 FW_ENCODE_32BIT_PATTERN; 11103 if (lic) 11104 count++; 11105 } 11106 } 11107 11108 return count; 11109} 11110 11111static void bnx2x_get_fcoe_info(struct bnx2x *bp) 11112{ 11113 int port = BP_PORT(bp); 11114 int func = BP_ABS_FUNC(bp); 11115 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 11116 drv_lic_key[port].max_fcoe_conn); 11117 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp); 11118 11119 if (!CNIC_SUPPORT(bp)) { 11120 bp->flags |= NO_FCOE_FLAG; 11121 return; 11122 } 11123 11124 /* Get the number of maximum allowed FCoE connections */ 11125 bp->cnic_eth_dev.max_fcoe_conn = 11126 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 11127 BNX2X_MAX_FCOE_INIT_CONN_SHIFT; 11128 11129 /* Calculate the number of maximum allowed FCoE tasks */ 11130 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE; 11131 11132 /* check if FCoE resources must be shared between different functions */ 11133 if (num_fcoe_func) 11134 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func; 11135 11136 /* Read the WWN: */ 11137 if (!IS_MF(bp)) { 11138 /* Port info */ 11139 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 11140 SHMEM_RD(bp, 11141 dev_info.port_hw_config[port]. 11142 fcoe_wwn_port_name_upper); 11143 bp->cnic_eth_dev.fcoe_wwn_port_name_lo = 11144 SHMEM_RD(bp, 11145 dev_info.port_hw_config[port]. 11146 fcoe_wwn_port_name_lower); 11147 11148 /* Node info */ 11149 bp->cnic_eth_dev.fcoe_wwn_node_name_hi = 11150 SHMEM_RD(bp, 11151 dev_info.port_hw_config[port]. 11152 fcoe_wwn_node_name_upper); 11153 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 11154 SHMEM_RD(bp, 11155 dev_info.port_hw_config[port]. 11156 fcoe_wwn_node_name_lower); 11157 } else if (!IS_MF_SD(bp)) { 11158 /* 11159 * Read the WWN info only if the FCoE feature is enabled for 11160 * this function. 11161 */ 11162 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) 11163 bnx2x_get_ext_wwn_info(bp, func); 11164 11165 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) { 11166 bnx2x_get_ext_wwn_info(bp, func); 11167 } 11168 11169 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 11170 11171 /* 11172 * If maximum allowed number of connections is zero - 11173 * disable the feature. 11174 */ 11175 if (!bp->cnic_eth_dev.max_fcoe_conn) 11176 bp->flags |= NO_FCOE_FLAG; 11177} 11178 11179static void bnx2x_get_cnic_info(struct bnx2x *bp) 11180{ 11181 /* 11182 * iSCSI may be dynamically disabled but reading 11183 * info here we will decrease memory usage by driver 11184 * if the feature is disabled for good 11185 */ 11186 bnx2x_get_iscsi_info(bp); 11187 bnx2x_get_fcoe_info(bp); 11188} 11189 11190static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) 11191{ 11192 u32 val, val2; 11193 int func = BP_ABS_FUNC(bp); 11194 int port = BP_PORT(bp); 11195 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 11196 u8 *fip_mac = bp->fip_mac; 11197 11198 if (IS_MF(bp)) { 11199 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or 11200 * FCoE MAC then the appropriate feature should be disabled. 11201 * In non SD mode features configuration comes from struct 11202 * func_ext_config. 11203 */ 11204 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) { 11205 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 11206 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 11207 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11208 iscsi_mac_addr_upper); 11209 val = MF_CFG_RD(bp, func_ext_config[func]. 11210 iscsi_mac_addr_lower); 11211 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11212 BNX2X_DEV_INFO 11213 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11214 } else { 11215 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11216 } 11217 11218 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 11219 val2 = MF_CFG_RD(bp, func_ext_config[func]. 11220 fcoe_mac_addr_upper); 11221 val = MF_CFG_RD(bp, func_ext_config[func]. 11222 fcoe_mac_addr_lower); 11223 bnx2x_set_mac_buf(fip_mac, val, val2); 11224 BNX2X_DEV_INFO 11225 ("Read FCoE L2 MAC: %pM\n", fip_mac); 11226 } else { 11227 bp->flags |= NO_FCOE_FLAG; 11228 } 11229 11230 bp->mf_ext_config = cfg; 11231 11232 } else { /* SD MODE */ 11233 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 11234 /* use primary mac as iscsi mac */ 11235 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN); 11236 11237 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 11238 BNX2X_DEV_INFO 11239 ("Read iSCSI MAC: %pM\n", iscsi_mac); 11240 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { 11241 /* use primary mac as fip mac */ 11242 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); 11243 BNX2X_DEV_INFO("SD FCoE MODE\n"); 11244 BNX2X_DEV_INFO 11245 ("Read FIP MAC: %pM\n", fip_mac); 11246 } 11247 } 11248 11249 /* If this is a storage-only interface, use SAN mac as 11250 * primary MAC. Notice that for SD this is already the case, 11251 * as the SAN mac was copied from the primary MAC. 11252 */ 11253 if (IS_MF_FCOE_AFEX(bp)) 11254 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 11255 } else { 11256 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11257 iscsi_mac_upper); 11258 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11259 iscsi_mac_lower); 11260 bnx2x_set_mac_buf(iscsi_mac, val, val2); 11261 11262 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11263 fcoe_fip_mac_upper); 11264 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 11265 fcoe_fip_mac_lower); 11266 bnx2x_set_mac_buf(fip_mac, val, val2); 11267 } 11268 11269 /* Disable iSCSI OOO if MAC configuration is invalid. */ 11270 if (!is_valid_ether_addr(iscsi_mac)) { 11271 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 11272 memset(iscsi_mac, 0, ETH_ALEN); 11273 } 11274 11275 /* Disable FCoE if MAC configuration is invalid. */ 11276 if (!is_valid_ether_addr(fip_mac)) { 11277 bp->flags |= NO_FCOE_FLAG; 11278 memset(bp->fip_mac, 0, ETH_ALEN); 11279 } 11280} 11281 11282static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) 11283{ 11284 u32 val, val2; 11285 int func = BP_ABS_FUNC(bp); 11286 int port = BP_PORT(bp); 11287 11288 /* Zero primary MAC configuration */ 11289 memset(bp->dev->dev_addr, 0, ETH_ALEN); 11290 11291 if (BP_NOMCP(bp)) { 11292 BNX2X_ERROR("warning: random MAC workaround active\n"); 11293 eth_hw_addr_random(bp->dev); 11294 } else if (IS_MF(bp)) { 11295 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 11296 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower); 11297 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 11298 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) 11299 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11300 11301 if (CNIC_SUPPORT(bp)) 11302 bnx2x_get_cnic_mac_hwinfo(bp); 11303 } else { 11304 /* in SF read MACs from port configuration */ 11305 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11306 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11307 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); 11308 11309 if (CNIC_SUPPORT(bp)) 11310 bnx2x_get_cnic_mac_hwinfo(bp); 11311 } 11312 11313 if (!BP_NOMCP(bp)) { 11314 /* Read physical port identifier from shmem */ 11315 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 11316 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 11317 bnx2x_set_mac_buf(bp->phys_port_id, val, val2); 11318 bp->flags |= HAS_PHYS_PORT_ID; 11319 } 11320 11321 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 11322 11323 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 11324 dev_err(&bp->pdev->dev, 11325 "bad Ethernet MAC address configuration: %pM\n" 11326 "change it manually before bringing up the appropriate network interface\n", 11327 bp->dev->dev_addr); 11328} 11329 11330static bool bnx2x_get_dropless_info(struct bnx2x *bp) 11331{ 11332 int tmp; 11333 u32 cfg; 11334 11335 if (IS_VF(bp)) 11336 return 0; 11337 11338 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11339 /* Take function: tmp = func */ 11340 tmp = BP_ABS_FUNC(bp); 11341 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg); 11342 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING); 11343 } else { 11344 /* Take port: tmp = port */ 11345 tmp = BP_PORT(bp); 11346 cfg = SHMEM_RD(bp, 11347 dev_info.port_hw_config[tmp].generic_features); 11348 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED); 11349 } 11350 return cfg; 11351} 11352 11353static int bnx2x_get_hwinfo(struct bnx2x *bp) 11354{ 11355 int /*abs*/func = BP_ABS_FUNC(bp); 11356 int vn; 11357 u32 val = 0; 11358 int rc = 0; 11359 11360 bnx2x_get_common_hwinfo(bp); 11361 11362 /* 11363 * initialize IGU parameters 11364 */ 11365 if (CHIP_IS_E1x(bp)) { 11366 bp->common.int_block = INT_BLOCK_HC; 11367 11368 bp->igu_dsb_id = DEF_SB_IGU_ID; 11369 bp->igu_base_sb = 0; 11370 } else { 11371 bp->common.int_block = INT_BLOCK_IGU; 11372 11373 /* do not allow device reset during IGU info processing */ 11374 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11375 11376 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 11377 11378 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11379 int tout = 5000; 11380 11381 BNX2X_DEV_INFO("FORCING Normal Mode\n"); 11382 11383 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); 11384 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); 11385 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); 11386 11387 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11388 tout--; 11389 usleep_range(1000, 2000); 11390 } 11391 11392 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 11393 dev_err(&bp->pdev->dev, 11394 "FORCING Normal Mode failed!!!\n"); 11395 bnx2x_release_hw_lock(bp, 11396 HW_LOCK_RESOURCE_RESET); 11397 return -EPERM; 11398 } 11399 } 11400 11401 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { 11402 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); 11403 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; 11404 } else 11405 BNX2X_DEV_INFO("IGU Normal Mode\n"); 11406 11407 rc = bnx2x_get_igu_cam_info(bp); 11408 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 11409 if (rc) 11410 return rc; 11411 } 11412 11413 /* 11414 * set base FW non-default (fast path) status block id, this value is 11415 * used to initialize the fw_sb_id saved on the fp/queue structure to 11416 * determine the id used by the FW. 11417 */ 11418 if (CHIP_IS_E1x(bp)) 11419 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); 11420 else /* 11421 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of 11422 * the same queue are indicated on the same IGU SB). So we prefer 11423 * FW and IGU SBs to be the same value. 11424 */ 11425 bp->base_fw_ndsb = bp->igu_base_sb; 11426 11427 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" 11428 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, 11429 bp->igu_sb_cnt, bp->base_fw_ndsb); 11430 11431 /* 11432 * Initialize MF configuration 11433 */ 11434 11435 bp->mf_ov = 0; 11436 bp->mf_mode = 0; 11437 vn = BP_VN(bp); 11438 11439 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 11440 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", 11441 bp->common.shmem2_base, SHMEM2_RD(bp, size), 11442 (u32)offsetof(struct shmem2_region, mf_cfg_addr)); 11443 11444 if (SHMEM2_HAS(bp, mf_cfg_addr)) 11445 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 11446 else 11447 bp->common.mf_cfg_base = bp->common.shmem_base + 11448 offsetof(struct shmem_region, func_mb) + 11449 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 11450 /* 11451 * get mf configuration: 11452 * 1. Existence of MF configuration 11453 * 2. MAC address must be legal (check only upper bytes) 11454 * for Switch-Independent mode; 11455 * OVLAN must be legal for Switch-Dependent mode 11456 * 3. SF_MODE configures specific MF mode 11457 */ 11458 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 11459 /* get mf configuration */ 11460 val = SHMEM_RD(bp, 11461 dev_info.shared_feature_config.config); 11462 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK; 11463 11464 switch (val) { 11465 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: 11466 val = MF_CFG_RD(bp, func_mf_config[func]. 11467 mac_upper); 11468 /* check for legal mac (upper bytes)*/ 11469 if (val != 0xffff) { 11470 bp->mf_mode = MULTI_FUNCTION_SI; 11471 bp->mf_config[vn] = MF_CFG_RD(bp, 11472 func_mf_config[func].config); 11473 } else 11474 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 11475 break; 11476 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: 11477 if ((!CHIP_IS_E1x(bp)) && 11478 (MF_CFG_RD(bp, func_mf_config[func]. 11479 mac_upper) != 0xffff) && 11480 (SHMEM2_HAS(bp, 11481 afex_driver_support))) { 11482 bp->mf_mode = MULTI_FUNCTION_AFEX; 11483 bp->mf_config[vn] = MF_CFG_RD(bp, 11484 func_mf_config[func].config); 11485 } else { 11486 BNX2X_DEV_INFO("can not configure afex mode\n"); 11487 } 11488 break; 11489 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 11490 /* get OV configuration */ 11491 val = MF_CFG_RD(bp, 11492 func_mf_config[FUNC_0].e1hov_tag); 11493 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 11494 11495 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 11496 bp->mf_mode = MULTI_FUNCTION_SD; 11497 bp->mf_config[vn] = MF_CFG_RD(bp, 11498 func_mf_config[func].config); 11499 } else 11500 BNX2X_DEV_INFO("illegal OV for SD\n"); 11501 break; 11502 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: 11503 bp->mf_config[vn] = 0; 11504 break; 11505 default: 11506 /* Unknown configuration: reset mf_config */ 11507 bp->mf_config[vn] = 0; 11508 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val); 11509 } 11510 } 11511 11512 BNX2X_DEV_INFO("%s function mode\n", 11513 IS_MF(bp) ? "multi" : "single"); 11514 11515 switch (bp->mf_mode) { 11516 case MULTI_FUNCTION_SD: 11517 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & 11518 FUNC_MF_CFG_E1HOV_TAG_MASK; 11519 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 11520 bp->mf_ov = val; 11521 bp->path_has_ovlan = true; 11522 11523 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", 11524 func, bp->mf_ov, bp->mf_ov); 11525 } else { 11526 dev_err(&bp->pdev->dev, 11527 "No valid MF OV for func %d, aborting\n", 11528 func); 11529 return -EPERM; 11530 } 11531 break; 11532 case MULTI_FUNCTION_AFEX: 11533 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func); 11534 break; 11535 case MULTI_FUNCTION_SI: 11536 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 11537 func); 11538 break; 11539 default: 11540 if (vn) { 11541 dev_err(&bp->pdev->dev, 11542 "VN %d is in a single function mode, aborting\n", 11543 vn); 11544 return -EPERM; 11545 } 11546 break; 11547 } 11548 11549 /* check if other port on the path needs ovlan: 11550 * Since MF configuration is shared between ports 11551 * Possible mixed modes are only 11552 * {SF, SI} {SF, SD} {SD, SF} {SI, SF} 11553 */ 11554 if (CHIP_MODE_IS_4_PORT(bp) && 11555 !bp->path_has_ovlan && 11556 !IS_MF(bp) && 11557 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { 11558 u8 other_port = !BP_PORT(bp); 11559 u8 other_func = BP_PATH(bp) + 2*other_port; 11560 val = MF_CFG_RD(bp, 11561 func_mf_config[other_func].e1hov_tag); 11562 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 11563 bp->path_has_ovlan = true; 11564 } 11565 } 11566 11567 /* adjust igu_sb_cnt to MF for E1H */ 11568 if (CHIP_IS_E1H(bp) && IS_MF(bp)) 11569 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT); 11570 11571 /* port info */ 11572 bnx2x_get_port_hwinfo(bp); 11573 11574 /* Get MAC addresses */ 11575 bnx2x_get_mac_hwinfo(bp); 11576 11577 bnx2x_get_cnic_info(bp); 11578 11579 return rc; 11580} 11581 11582static void bnx2x_read_fwinfo(struct bnx2x *bp) 11583{ 11584 int cnt, i, block_end, rodi; 11585 char vpd_start[BNX2X_VPD_LEN+1]; 11586 char str_id_reg[VENDOR_ID_LEN+1]; 11587 char str_id_cap[VENDOR_ID_LEN+1]; 11588 char *vpd_data; 11589 char *vpd_extended_data = NULL; 11590 u8 len; 11591 11592 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); 11593 memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); 11594 11595 if (cnt < BNX2X_VPD_LEN) 11596 goto out_not_found; 11597 11598 /* VPD RO tag should be first tag after identifier string, hence 11599 * we should be able to find it in first BNX2X_VPD_LEN chars 11600 */ 11601 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN, 11602 PCI_VPD_LRDT_RO_DATA); 11603 if (i < 0) 11604 goto out_not_found; 11605 11606 block_end = i + PCI_VPD_LRDT_TAG_SIZE + 11607 pci_vpd_lrdt_size(&vpd_start[i]); 11608 11609 i += PCI_VPD_LRDT_TAG_SIZE; 11610 11611 if (block_end > BNX2X_VPD_LEN) { 11612 vpd_extended_data = kmalloc(block_end, GFP_KERNEL); 11613 if (vpd_extended_data == NULL) 11614 goto out_not_found; 11615 11616 /* read rest of vpd image into vpd_extended_data */ 11617 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); 11618 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, 11619 block_end - BNX2X_VPD_LEN, 11620 vpd_extended_data + BNX2X_VPD_LEN); 11621 if (cnt < (block_end - BNX2X_VPD_LEN)) 11622 goto out_not_found; 11623 vpd_data = vpd_extended_data; 11624 } else 11625 vpd_data = vpd_start; 11626 11627 /* now vpd_data holds full vpd content in both cases */ 11628 11629 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 11630 PCI_VPD_RO_KEYWORD_MFR_ID); 11631 if (rodi < 0) 11632 goto out_not_found; 11633 11634 len = pci_vpd_info_field_size(&vpd_data[rodi]); 11635 11636 if (len != VENDOR_ID_LEN) 11637 goto out_not_found; 11638 11639 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 11640 11641 /* vendor specific info */ 11642 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); 11643 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); 11644 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || 11645 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { 11646 11647 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, 11648 PCI_VPD_RO_KEYWORD_VENDOR0); 11649 if (rodi >= 0) { 11650 len = pci_vpd_info_field_size(&vpd_data[rodi]); 11651 11652 rodi += PCI_VPD_INFO_FLD_HDR_SIZE; 11653 11654 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { 11655 memcpy(bp->fw_ver, &vpd_data[rodi], len); 11656 bp->fw_ver[len] = ' '; 11657 } 11658 } 11659 kfree(vpd_extended_data); 11660 return; 11661 } 11662out_not_found: 11663 kfree(vpd_extended_data); 11664 return; 11665} 11666 11667static void bnx2x_set_modes_bitmap(struct bnx2x *bp) 11668{ 11669 u32 flags = 0; 11670 11671 if (CHIP_REV_IS_FPGA(bp)) 11672 SET_FLAGS(flags, MODE_FPGA); 11673 else if (CHIP_REV_IS_EMUL(bp)) 11674 SET_FLAGS(flags, MODE_EMUL); 11675 else 11676 SET_FLAGS(flags, MODE_ASIC); 11677 11678 if (CHIP_MODE_IS_4_PORT(bp)) 11679 SET_FLAGS(flags, MODE_PORT4); 11680 else 11681 SET_FLAGS(flags, MODE_PORT2); 11682 11683 if (CHIP_IS_E2(bp)) 11684 SET_FLAGS(flags, MODE_E2); 11685 else if (CHIP_IS_E3(bp)) { 11686 SET_FLAGS(flags, MODE_E3); 11687 if (CHIP_REV(bp) == CHIP_REV_Ax) 11688 SET_FLAGS(flags, MODE_E3_A0); 11689 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ 11690 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); 11691 } 11692 11693 if (IS_MF(bp)) { 11694 SET_FLAGS(flags, MODE_MF); 11695 switch (bp->mf_mode) { 11696 case MULTI_FUNCTION_SD: 11697 SET_FLAGS(flags, MODE_MF_SD); 11698 break; 11699 case MULTI_FUNCTION_SI: 11700 SET_FLAGS(flags, MODE_MF_SI); 11701 break; 11702 case MULTI_FUNCTION_AFEX: 11703 SET_FLAGS(flags, MODE_MF_AFEX); 11704 break; 11705 } 11706 } else 11707 SET_FLAGS(flags, MODE_SF); 11708 11709#if defined(__LITTLE_ENDIAN) 11710 SET_FLAGS(flags, MODE_LITTLE_ENDIAN); 11711#else /*(__BIG_ENDIAN)*/ 11712 SET_FLAGS(flags, MODE_BIG_ENDIAN); 11713#endif 11714 INIT_MODE_FLAGS(bp) = flags; 11715} 11716 11717static int bnx2x_init_bp(struct bnx2x *bp) 11718{ 11719 int func; 11720 int rc; 11721 11722 mutex_init(&bp->port.phy_mutex); 11723 mutex_init(&bp->fw_mb_mutex); 11724 spin_lock_init(&bp->stats_lock); 11725 sema_init(&bp->stats_sema, 1); 11726 11727 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11728 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11729 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); 11730 if (IS_PF(bp)) { 11731 rc = bnx2x_get_hwinfo(bp); 11732 if (rc) 11733 return rc; 11734 } else { 11735 eth_zero_addr(bp->dev->dev_addr); 11736 } 11737 11738 bnx2x_set_modes_bitmap(bp); 11739 11740 rc = bnx2x_alloc_mem_bp(bp); 11741 if (rc) 11742 return rc; 11743 11744 bnx2x_read_fwinfo(bp); 11745 11746 func = BP_FUNC(bp); 11747 11748 /* need to reset chip if undi was active */ 11749 if (IS_PF(bp) && !BP_NOMCP(bp)) { 11750 /* init fw_seq */ 11751 bp->fw_seq = 11752 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 11753 DRV_MSG_SEQ_NUMBER_MASK; 11754 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 11755 11756 rc = bnx2x_prev_unload(bp); 11757 if (rc) { 11758 bnx2x_free_mem_bp(bp); 11759 return rc; 11760 } 11761 } 11762 11763 if (CHIP_REV_IS_FPGA(bp)) 11764 dev_err(&bp->pdev->dev, "FPGA detected\n"); 11765 11766 if (BP_NOMCP(bp) && (func == 0)) 11767 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 11768 11769 bp->disable_tpa = disable_tpa; 11770 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11771 11772 /* Set TPA flags */ 11773 if (bp->disable_tpa) { 11774 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 11775 bp->dev->features &= ~NETIF_F_LRO; 11776 } else { 11777 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); 11778 bp->dev->features |= NETIF_F_LRO; 11779 } 11780 11781 if (CHIP_IS_E1(bp)) 11782 bp->dropless_fc = 0; 11783 else 11784 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp); 11785 11786 bp->mrrs = mrrs; 11787 11788 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; 11789 if (IS_VF(bp)) 11790 bp->rx_ring_size = MAX_RX_AVAIL; 11791 11792 /* make sure that the numbers are in the right granularity */ 11793 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 11794 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR; 11795 11796 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; 11797 11798 init_timer(&bp->timer); 11799 bp->timer.expires = jiffies + bp->current_interval; 11800 bp->timer.data = (unsigned long) bp; 11801 bp->timer.function = bnx2x_timer; 11802 11803 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && 11804 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && 11805 SHMEM2_RD(bp, dcbx_lldp_params_offset) && 11806 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { 11807 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 11808 bnx2x_dcbx_init_params(bp); 11809 } else { 11810 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF); 11811 } 11812 11813 if (CHIP_IS_E1x(bp)) 11814 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 11815 else 11816 bp->cnic_base_cl_id = FP_SB_MAX_E2; 11817 11818 /* multiple tx priority */ 11819 if (IS_VF(bp)) 11820 bp->max_cos = 1; 11821 else if (CHIP_IS_E1x(bp)) 11822 bp->max_cos = BNX2X_MULTI_TX_COS_E1X; 11823 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) 11824 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; 11825 else if (CHIP_IS_E3B0(bp)) 11826 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 11827 else 11828 BNX2X_ERR("unknown chip %x revision %x\n", 11829 CHIP_NUM(bp), CHIP_REV(bp)); 11830 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos); 11831 11832 /* We need at least one default status block for slow-path events, 11833 * second status block for the L2 queue, and a third status block for 11834 * CNIC if supported. 11835 */ 11836 if (IS_VF(bp)) 11837 bp->min_msix_vec_cnt = 1; 11838 else if (CNIC_SUPPORT(bp)) 11839 bp->min_msix_vec_cnt = 3; 11840 else /* PF w/o cnic */ 11841 bp->min_msix_vec_cnt = 2; 11842 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); 11843 11844 bp->dump_preset_idx = 1; 11845 11846 return rc; 11847} 11848 11849/**************************************************************************** 11850* General service functions 11851****************************************************************************/ 11852 11853/* 11854 * net_device service functions 11855 */ 11856 11857/* called with rtnl_lock */ 11858static int bnx2x_open(struct net_device *dev) 11859{ 11860 struct bnx2x *bp = netdev_priv(dev); 11861 int rc; 11862 11863 bp->stats_init = true; 11864 11865 netif_carrier_off(dev); 11866 11867 bnx2x_set_power_state(bp, PCI_D0); 11868 11869 /* If parity had happen during the unload, then attentions 11870 * and/or RECOVERY_IN_PROGRES may still be set. In this case we 11871 * want the first function loaded on the current engine to 11872 * complete the recovery. 11873 * Parity recovery is only relevant for PF driver. 11874 */ 11875 if (IS_PF(bp)) { 11876 int other_engine = BP_PATH(bp) ? 0 : 1; 11877 bool other_load_status, load_status; 11878 bool global = false; 11879 11880 other_load_status = bnx2x_get_load_status(bp, other_engine); 11881 load_status = bnx2x_get_load_status(bp, BP_PATH(bp)); 11882 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || 11883 bnx2x_chk_parity_attn(bp, &global, true)) { 11884 do { 11885 /* If there are attentions and they are in a 11886 * global blocks, set the GLOBAL_RESET bit 11887 * regardless whether it will be this function 11888 * that will complete the recovery or not. 11889 */ 11890 if (global) 11891 bnx2x_set_reset_global(bp); 11892 11893 /* Only the first function on the current 11894 * engine should try to recover in open. In case 11895 * of attentions in global blocks only the first 11896 * in the chip should try to recover. 11897 */ 11898 if ((!load_status && 11899 (!global || !other_load_status)) && 11900 bnx2x_trylock_leader_lock(bp) && 11901 !bnx2x_leader_reset(bp)) { 11902 netdev_info(bp->dev, 11903 "Recovered in open\n"); 11904 break; 11905 } 11906 11907 /* recovery has failed... */ 11908 bnx2x_set_power_state(bp, PCI_D3hot); 11909 bp->recovery_state = BNX2X_RECOVERY_FAILED; 11910 11911 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n" 11912 "If you still see this message after a few retries then power cycle is required.\n"); 11913 11914 return -EAGAIN; 11915 } while (0); 11916 } 11917 } 11918 11919 bp->recovery_state = BNX2X_RECOVERY_DONE; 11920 rc = bnx2x_nic_load(bp, LOAD_OPEN); 11921 if (rc) 11922 return rc; 11923 return 0; 11924} 11925 11926/* called with rtnl_lock */ 11927static int bnx2x_close(struct net_device *dev) 11928{ 11929 struct bnx2x *bp = netdev_priv(dev); 11930 11931 /* Unload the driver, release IRQs */ 11932 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); 11933 11934 return 0; 11935} 11936 11937static int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 11938 struct bnx2x_mcast_ramrod_params *p) 11939{ 11940 int mc_count = netdev_mc_count(bp->dev); 11941 struct bnx2x_mcast_list_elem *mc_mac = 11942 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); 11943 struct netdev_hw_addr *ha; 11944 11945 if (!mc_mac) 11946 return -ENOMEM; 11947 11948 INIT_LIST_HEAD(&p->mcast_list); 11949 11950 netdev_for_each_mc_addr(ha, bp->dev) { 11951 mc_mac->mac = bnx2x_mc_addr(ha); 11952 list_add_tail(&mc_mac->link, &p->mcast_list); 11953 mc_mac++; 11954 } 11955 11956 p->mcast_list_len = mc_count; 11957 11958 return 0; 11959} 11960 11961static void bnx2x_free_mcast_macs_list( 11962 struct bnx2x_mcast_ramrod_params *p) 11963{ 11964 struct bnx2x_mcast_list_elem *mc_mac = 11965 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, 11966 link); 11967 11968 WARN_ON(!mc_mac); 11969 kfree(mc_mac); 11970} 11971 11972/** 11973 * bnx2x_set_uc_list - configure a new unicast MACs list. 11974 * 11975 * @bp: driver handle 11976 * 11977 * We will use zero (0) as a MAC type for these MACs. 11978 */ 11979static int bnx2x_set_uc_list(struct bnx2x *bp) 11980{ 11981 int rc; 11982 struct net_device *dev = bp->dev; 11983 struct netdev_hw_addr *ha; 11984 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; 11985 unsigned long ramrod_flags = 0; 11986 11987 /* First schedule a cleanup up of old configuration */ 11988 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); 11989 if (rc < 0) { 11990 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); 11991 return rc; 11992 } 11993 11994 netdev_for_each_uc_addr(ha, dev) { 11995 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, 11996 BNX2X_UC_LIST_MAC, &ramrod_flags); 11997 if (rc == -EEXIST) { 11998 DP(BNX2X_MSG_SP, 11999 "Failed to schedule ADD operations: %d\n", rc); 12000 /* do not treat adding same MAC as error */ 12001 rc = 0; 12002 12003 } else if (rc < 0) { 12004 12005 BNX2X_ERR("Failed to schedule ADD operations: %d\n", 12006 rc); 12007 return rc; 12008 } 12009 } 12010 12011 /* Execute the pending commands */ 12012 __set_bit(RAMROD_CONT, &ramrod_flags); 12013 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, 12014 BNX2X_UC_LIST_MAC, &ramrod_flags); 12015} 12016 12017static int bnx2x_set_mc_list(struct bnx2x *bp) 12018{ 12019 struct net_device *dev = bp->dev; 12020 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 12021 int rc = 0; 12022 12023 rparam.mcast_obj = &bp->mcast_obj; 12024 12025 /* first, clear all configured multicast MACs */ 12026 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12027 if (rc < 0) { 12028 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc); 12029 return rc; 12030 } 12031 12032 /* then, configure a new MACs list */ 12033 if (netdev_mc_count(dev)) { 12034 rc = bnx2x_init_mcast_macs_list(bp, &rparam); 12035 if (rc) { 12036 BNX2X_ERR("Failed to create multicast MACs list: %d\n", 12037 rc); 12038 return rc; 12039 } 12040 12041 /* Now add the new MACs */ 12042 rc = bnx2x_config_mcast(bp, &rparam, 12043 BNX2X_MCAST_CMD_ADD); 12044 if (rc < 0) 12045 BNX2X_ERR("Failed to set a new multicast configuration: %d\n", 12046 rc); 12047 12048 bnx2x_free_mcast_macs_list(&rparam); 12049 } 12050 12051 return rc; 12052} 12053 12054/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ 12055static void bnx2x_set_rx_mode(struct net_device *dev) 12056{ 12057 struct bnx2x *bp = netdev_priv(dev); 12058 12059 if (bp->state != BNX2X_STATE_OPEN) { 12060 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 12061 return; 12062 } else { 12063 /* Schedule an SP task to handle rest of change */ 12064 DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); 12065 smp_mb__before_clear_bit(); 12066 set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state); 12067 smp_mb__after_clear_bit(); 12068 schedule_delayed_work(&bp->sp_rtnl_task, 0); 12069 } 12070} 12071 12072void bnx2x_set_rx_mode_inner(struct bnx2x *bp) 12073{ 12074 u32 rx_mode = BNX2X_RX_MODE_NORMAL; 12075 12076 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); 12077 12078 netif_addr_lock_bh(bp->dev); 12079 12080 if (bp->dev->flags & IFF_PROMISC) { 12081 rx_mode = BNX2X_RX_MODE_PROMISC; 12082 } else if ((bp->dev->flags & IFF_ALLMULTI) || 12083 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) && 12084 CHIP_IS_E1(bp))) { 12085 rx_mode = BNX2X_RX_MODE_ALLMULTI; 12086 } else { 12087 if (IS_PF(bp)) { 12088 /* some multicasts */ 12089 if (bnx2x_set_mc_list(bp) < 0) 12090 rx_mode = BNX2X_RX_MODE_ALLMULTI; 12091 12092 /* release bh lock, as bnx2x_set_uc_list might sleep */ 12093 netif_addr_unlock_bh(bp->dev); 12094 if (bnx2x_set_uc_list(bp) < 0) 12095 rx_mode = BNX2X_RX_MODE_PROMISC; 12096 netif_addr_lock_bh(bp->dev); 12097 } else { 12098 /* configuring mcast to a vf involves sleeping (when we 12099 * wait for the pf's response). 12100 */ 12101 smp_mb__before_clear_bit(); 12102 set_bit(BNX2X_SP_RTNL_VFPF_MCAST, 12103 &bp->sp_rtnl_state); 12104 smp_mb__after_clear_bit(); 12105 schedule_delayed_work(&bp->sp_rtnl_task, 0); 12106 } 12107 } 12108 12109 bp->rx_mode = rx_mode; 12110 /* handle ISCSI SD mode */ 12111 if (IS_MF_ISCSI_SD(bp)) 12112 bp->rx_mode = BNX2X_RX_MODE_NONE; 12113 12114 /* Schedule the rx_mode command */ 12115 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 12116 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); 12117 netif_addr_unlock_bh(bp->dev); 12118 return; 12119 } 12120 12121 if (IS_PF(bp)) { 12122 bnx2x_set_storm_rx_mode(bp); 12123 netif_addr_unlock_bh(bp->dev); 12124 } else { 12125 /* VF will need to request the PF to make this change, and so 12126 * the VF needs to release the bottom-half lock prior to the 12127 * request (as it will likely require sleep on the VF side) 12128 */ 12129 netif_addr_unlock_bh(bp->dev); 12130 bnx2x_vfpf_storm_rx_mode(bp); 12131 } 12132} 12133 12134/* called with rtnl_lock */ 12135static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 12136 int devad, u16 addr) 12137{ 12138 struct bnx2x *bp = netdev_priv(netdev); 12139 u16 value; 12140 int rc; 12141 12142 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 12143 prtad, devad, addr); 12144 12145 /* The HW expects different devad if CL22 is used */ 12146 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 12147 12148 bnx2x_acquire_phy_lock(bp); 12149 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value); 12150 bnx2x_release_phy_lock(bp); 12151 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 12152 12153 if (!rc) 12154 rc = value; 12155 return rc; 12156} 12157 12158/* called with rtnl_lock */ 12159static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad, 12160 u16 addr, u16 value) 12161{ 12162 struct bnx2x *bp = netdev_priv(netdev); 12163 int rc; 12164 12165 DP(NETIF_MSG_LINK, 12166 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n", 12167 prtad, devad, addr, value); 12168 12169 /* The HW expects different devad if CL22 is used */ 12170 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 12171 12172 bnx2x_acquire_phy_lock(bp); 12173 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value); 12174 bnx2x_release_phy_lock(bp); 12175 return rc; 12176} 12177 12178/* called with rtnl_lock */ 12179static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 12180{ 12181 struct bnx2x *bp = netdev_priv(dev); 12182 struct mii_ioctl_data *mdio = if_mii(ifr); 12183 12184 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", 12185 mdio->phy_id, mdio->reg_num, mdio->val_in); 12186 12187 if (!netif_running(dev)) 12188 return -EAGAIN; 12189 12190 return mdio_mii_ioctl(&bp->mdio, mdio, cmd); 12191} 12192 12193#ifdef CONFIG_NET_POLL_CONTROLLER 12194static void poll_bnx2x(struct net_device *dev) 12195{ 12196 struct bnx2x *bp = netdev_priv(dev); 12197 int i; 12198 12199 for_each_eth_queue(bp, i) { 12200 struct bnx2x_fastpath *fp = &bp->fp[i]; 12201 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 12202 } 12203} 12204#endif 12205 12206static int bnx2x_validate_addr(struct net_device *dev) 12207{ 12208 struct bnx2x *bp = netdev_priv(dev); 12209 12210 /* query the bulletin board for mac address configured by the PF */ 12211 if (IS_VF(bp)) 12212 bnx2x_sample_bulletin(bp); 12213 12214 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { 12215 BNX2X_ERR("Non-valid Ethernet address\n"); 12216 return -EADDRNOTAVAIL; 12217 } 12218 return 0; 12219} 12220 12221static int bnx2x_get_phys_port_id(struct net_device *netdev, 12222 struct netdev_phys_port_id *ppid) 12223{ 12224 struct bnx2x *bp = netdev_priv(netdev); 12225 12226 if (!(bp->flags & HAS_PHYS_PORT_ID)) 12227 return -EOPNOTSUPP; 12228 12229 ppid->id_len = sizeof(bp->phys_port_id); 12230 memcpy(ppid->id, bp->phys_port_id, ppid->id_len); 12231 12232 return 0; 12233} 12234 12235static const struct net_device_ops bnx2x_netdev_ops = { 12236 .ndo_open = bnx2x_open, 12237 .ndo_stop = bnx2x_close, 12238 .ndo_start_xmit = bnx2x_start_xmit, 12239 .ndo_select_queue = bnx2x_select_queue, 12240 .ndo_set_rx_mode = bnx2x_set_rx_mode, 12241 .ndo_set_mac_address = bnx2x_change_mac_addr, 12242 .ndo_validate_addr = bnx2x_validate_addr, 12243 .ndo_do_ioctl = bnx2x_ioctl, 12244 .ndo_change_mtu = bnx2x_change_mtu, 12245 .ndo_fix_features = bnx2x_fix_features, 12246 .ndo_set_features = bnx2x_set_features, 12247 .ndo_tx_timeout = bnx2x_tx_timeout, 12248#ifdef CONFIG_NET_POLL_CONTROLLER 12249 .ndo_poll_controller = poll_bnx2x, 12250#endif 12251 .ndo_setup_tc = bnx2x_setup_tc, 12252#ifdef CONFIG_BNX2X_SRIOV 12253 .ndo_set_vf_mac = bnx2x_set_vf_mac, 12254 .ndo_set_vf_vlan = bnx2x_set_vf_vlan, 12255 .ndo_get_vf_config = bnx2x_get_vf_config, 12256#endif 12257#ifdef NETDEV_FCOE_WWNN 12258 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 12259#endif 12260 12261#ifdef CONFIG_NET_RX_BUSY_POLL 12262 .ndo_busy_poll = bnx2x_low_latency_recv, 12263#endif 12264 .ndo_get_phys_port_id = bnx2x_get_phys_port_id, 12265}; 12266 12267static int bnx2x_set_coherency_mask(struct bnx2x *bp) 12268{ 12269 struct device *dev = &bp->pdev->dev; 12270 12271 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && 12272 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) { 12273 dev_err(dev, "System does not support DMA, aborting\n"); 12274 return -EIO; 12275 } 12276 12277 return 0; 12278} 12279 12280static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) 12281{ 12282 if (bp->flags & AER_ENABLED) { 12283 pci_disable_pcie_error_reporting(bp->pdev); 12284 bp->flags &= ~AER_ENABLED; 12285 } 12286} 12287 12288static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, 12289 struct net_device *dev, unsigned long board_type) 12290{ 12291 int rc; 12292 u32 pci_cfg_dword; 12293 bool chip_is_e1x = (board_type == BCM57710 || 12294 board_type == BCM57711 || 12295 board_type == BCM57711E); 12296 12297 SET_NETDEV_DEV(dev, &pdev->dev); 12298 12299 bp->dev = dev; 12300 bp->pdev = pdev; 12301 12302 rc = pci_enable_device(pdev); 12303 if (rc) { 12304 dev_err(&bp->pdev->dev, 12305 "Cannot enable PCI device, aborting\n"); 12306 goto err_out; 12307 } 12308 12309 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 12310 dev_err(&bp->pdev->dev, 12311 "Cannot find PCI device base address, aborting\n"); 12312 rc = -ENODEV; 12313 goto err_out_disable; 12314 } 12315 12316 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 12317 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n"); 12318 rc = -ENODEV; 12319 goto err_out_disable; 12320 } 12321 12322 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword); 12323 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) == 12324 PCICFG_REVESION_ID_ERROR_VAL) { 12325 pr_err("PCI device error, probably due to fan failure, aborting\n"); 12326 rc = -ENODEV; 12327 goto err_out_disable; 12328 } 12329 12330 if (atomic_read(&pdev->enable_cnt) == 1) { 12331 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 12332 if (rc) { 12333 dev_err(&bp->pdev->dev, 12334 "Cannot obtain PCI resources, aborting\n"); 12335 goto err_out_disable; 12336 } 12337 12338 pci_set_master(pdev); 12339 pci_save_state(pdev); 12340 } 12341 12342 if (IS_PF(bp)) { 12343 if (!pdev->pm_cap) { 12344 dev_err(&bp->pdev->dev, 12345 "Cannot find power management capability, aborting\n"); 12346 rc = -EIO; 12347 goto err_out_release; 12348 } 12349 } 12350 12351 if (!pci_is_pcie(pdev)) { 12352 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); 12353 rc = -EIO; 12354 goto err_out_release; 12355 } 12356 12357 rc = bnx2x_set_coherency_mask(bp); 12358 if (rc) 12359 goto err_out_release; 12360 12361 dev->mem_start = pci_resource_start(pdev, 0); 12362 dev->base_addr = dev->mem_start; 12363 dev->mem_end = pci_resource_end(pdev, 0); 12364 12365 dev->irq = pdev->irq; 12366 12367 bp->regview = pci_ioremap_bar(pdev, 0); 12368 if (!bp->regview) { 12369 dev_err(&bp->pdev->dev, 12370 "Cannot map register space, aborting\n"); 12371 rc = -ENOMEM; 12372 goto err_out_release; 12373 } 12374 12375 /* In E1/E1H use pci device function given by kernel. 12376 * In E2/E3 read physical function from ME register since these chips 12377 * support Physical Device Assignment where kernel BDF maybe arbitrary 12378 * (depending on hypervisor). 12379 */ 12380 if (chip_is_e1x) { 12381 bp->pf_num = PCI_FUNC(pdev->devfn); 12382 } else { 12383 /* chip is E2/3*/ 12384 pci_read_config_dword(bp->pdev, 12385 PCICFG_ME_REGISTER, &pci_cfg_dword); 12386 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >> 12387 ME_REG_ABS_PF_NUM_SHIFT); 12388 } 12389 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num); 12390 12391 /* clean indirect addresses */ 12392 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 12393 PCICFG_VENDOR_ID_OFFSET); 12394 12395 /* AER (Advanced Error reporting) configuration */ 12396 rc = pci_enable_pcie_error_reporting(pdev); 12397 if (!rc) 12398 bp->flags |= AER_ENABLED; 12399 else 12400 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc); 12401 12402 /* 12403 * Clean the following indirect addresses for all functions since it 12404 * is not used by the driver. 12405 */ 12406 if (IS_PF(bp)) { 12407 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); 12408 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); 12409 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); 12410 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); 12411 12412 if (chip_is_e1x) { 12413 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); 12414 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); 12415 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); 12416 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); 12417 } 12418 12419 /* Enable internal target-read (in case we are probed after PF 12420 * FLR). Must be done prior to any BAR read access. Only for 12421 * 57712 and up 12422 */ 12423 if (!chip_is_e1x) 12424 REG_WR(bp, 12425 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 12426 } 12427 12428 dev->watchdog_timeo = TX_TIMEOUT; 12429 12430 dev->netdev_ops = &bnx2x_netdev_ops; 12431 bnx2x_set_ethtool_ops(bp, dev); 12432 12433 dev->priv_flags |= IFF_UNICAST_FLT; 12434 12435 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12436 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12437 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12438 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12439 if (!CHIP_IS_E1x(bp)) { 12440 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | 12441 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; 12442 dev->hw_enc_features = 12443 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 12444 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12445 NETIF_F_GSO_IPIP | 12446 NETIF_F_GSO_SIT | 12447 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; 12448 } 12449 12450 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12451 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12452 12453 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; 12454 dev->features |= NETIF_F_HIGHDMA; 12455 12456 /* Add Loopback capability to the device */ 12457 dev->hw_features |= NETIF_F_LOOPBACK; 12458 12459#ifdef BCM_DCBNL 12460 dev->dcbnl_ops = &bnx2x_dcbnl_ops; 12461#endif 12462 12463 /* get_port_hwinfo() will set prtad and mmds properly */ 12464 bp->mdio.prtad = MDIO_PRTAD_NONE; 12465 bp->mdio.mmds = 0; 12466 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 12467 bp->mdio.dev = dev; 12468 bp->mdio.mdio_read = bnx2x_mdio_read; 12469 bp->mdio.mdio_write = bnx2x_mdio_write; 12470 12471 return 0; 12472 12473err_out_release: 12474 if (atomic_read(&pdev->enable_cnt) == 1) 12475 pci_release_regions(pdev); 12476 12477err_out_disable: 12478 pci_disable_device(pdev); 12479 12480err_out: 12481 return rc; 12482} 12483 12484static int bnx2x_check_firmware(struct bnx2x *bp) 12485{ 12486 const struct firmware *firmware = bp->firmware; 12487 struct bnx2x_fw_file_hdr *fw_hdr; 12488 struct bnx2x_fw_file_section *sections; 12489 u32 offset, len, num_ops; 12490 __be16 *ops_offsets; 12491 int i; 12492 const u8 *fw_ver; 12493 12494 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) { 12495 BNX2X_ERR("Wrong FW size\n"); 12496 return -EINVAL; 12497 } 12498 12499 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data; 12500 sections = (struct bnx2x_fw_file_section *)fw_hdr; 12501 12502 /* Make sure none of the offsets and sizes make us read beyond 12503 * the end of the firmware data */ 12504 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) { 12505 offset = be32_to_cpu(sections[i].offset); 12506 len = be32_to_cpu(sections[i].len); 12507 if (offset + len > firmware->size) { 12508 BNX2X_ERR("Section %d length is out of bounds\n", i); 12509 return -EINVAL; 12510 } 12511 } 12512 12513 /* Likewise for the init_ops offsets */ 12514 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset); 12515 ops_offsets = (__force __be16 *)(firmware->data + offset); 12516 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op); 12517 12518 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 12519 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 12520 BNX2X_ERR("Section offset %d is out of bounds\n", i); 12521 return -EINVAL; 12522 } 12523 } 12524 12525 /* Check FW version */ 12526 offset = be32_to_cpu(fw_hdr->fw_version.offset); 12527 fw_ver = firmware->data + offset; 12528 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || 12529 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 12530 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 12531 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 12532 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 12533 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], 12534 BCM_5710_FW_MAJOR_VERSION, 12535 BCM_5710_FW_MINOR_VERSION, 12536 BCM_5710_FW_REVISION_VERSION, 12537 BCM_5710_FW_ENGINEERING_VERSION); 12538 return -EINVAL; 12539 } 12540 12541 return 0; 12542} 12543 12544static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 12545{ 12546 const __be32 *source = (const __be32 *)_source; 12547 u32 *target = (u32 *)_target; 12548 u32 i; 12549 12550 for (i = 0; i < n/4; i++) 12551 target[i] = be32_to_cpu(source[i]); 12552} 12553 12554/* 12555 Ops array is stored in the following format: 12556 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 12557 */ 12558static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 12559{ 12560 const __be32 *source = (const __be32 *)_source; 12561 struct raw_op *target = (struct raw_op *)_target; 12562 u32 i, j, tmp; 12563 12564 for (i = 0, j = 0; i < n/8; i++, j += 2) { 12565 tmp = be32_to_cpu(source[j]); 12566 target[i].op = (tmp >> 24) & 0xff; 12567 target[i].offset = tmp & 0xffffff; 12568 target[i].raw_data = be32_to_cpu(source[j + 1]); 12569 } 12570} 12571 12572/* IRO array is stored in the following format: 12573 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 12574 */ 12575static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 12576{ 12577 const __be32 *source = (const __be32 *)_source; 12578 struct iro *target = (struct iro *)_target; 12579 u32 i, j, tmp; 12580 12581 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) { 12582 target[i].base = be32_to_cpu(source[j]); 12583 j++; 12584 tmp = be32_to_cpu(source[j]); 12585 target[i].m1 = (tmp >> 16) & 0xffff; 12586 target[i].m2 = tmp & 0xffff; 12587 j++; 12588 tmp = be32_to_cpu(source[j]); 12589 target[i].m3 = (tmp >> 16) & 0xffff; 12590 target[i].size = tmp & 0xffff; 12591 j++; 12592 } 12593} 12594 12595static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 12596{ 12597 const __be16 *source = (const __be16 *)_source; 12598 u16 *target = (u16 *)_target; 12599 u32 i; 12600 12601 for (i = 0; i < n/2; i++) 12602 target[i] = be16_to_cpu(source[i]); 12603} 12604 12605#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \ 12606do { \ 12607 u32 len = be32_to_cpu(fw_hdr->arr.len); \ 12608 bp->arr = kmalloc(len, GFP_KERNEL); \ 12609 if (!bp->arr) \ 12610 goto lbl; \ 12611 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \ 12612 (u8 *)bp->arr, len); \ 12613} while (0) 12614 12615static int bnx2x_init_firmware(struct bnx2x *bp) 12616{ 12617 const char *fw_file_name; 12618 struct bnx2x_fw_file_hdr *fw_hdr; 12619 int rc; 12620 12621 if (bp->firmware) 12622 return 0; 12623 12624 if (CHIP_IS_E1(bp)) 12625 fw_file_name = FW_FILE_NAME_E1; 12626 else if (CHIP_IS_E1H(bp)) 12627 fw_file_name = FW_FILE_NAME_E1H; 12628 else if (!CHIP_IS_E1x(bp)) 12629 fw_file_name = FW_FILE_NAME_E2; 12630 else { 12631 BNX2X_ERR("Unsupported chip revision\n"); 12632 return -EINVAL; 12633 } 12634 BNX2X_DEV_INFO("Loading %s\n", fw_file_name); 12635 12636 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); 12637 if (rc) { 12638 BNX2X_ERR("Can't load firmware file %s\n", 12639 fw_file_name); 12640 goto request_firmware_exit; 12641 } 12642 12643 rc = bnx2x_check_firmware(bp); 12644 if (rc) { 12645 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); 12646 goto request_firmware_exit; 12647 } 12648 12649 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data; 12650 12651 /* Initialize the pointers to the init arrays */ 12652 /* Blob */ 12653 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n); 12654 12655 /* Opcodes */ 12656 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops); 12657 12658 /* Offsets */ 12659 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, 12660 be16_to_cpu_n); 12661 12662 /* STORMs firmware */ 12663 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12664 be32_to_cpu(fw_hdr->tsem_int_table_data.offset); 12665 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data + 12666 be32_to_cpu(fw_hdr->tsem_pram_data.offset); 12667 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12668 be32_to_cpu(fw_hdr->usem_int_table_data.offset); 12669 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data + 12670 be32_to_cpu(fw_hdr->usem_pram_data.offset); 12671 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12672 be32_to_cpu(fw_hdr->xsem_int_table_data.offset); 12673 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data + 12674 be32_to_cpu(fw_hdr->xsem_pram_data.offset); 12675 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data + 12676 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 12677 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 12678 be32_to_cpu(fw_hdr->csem_pram_data.offset); 12679 /* IRO */ 12680 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro); 12681 12682 return 0; 12683 12684iro_alloc_err: 12685 kfree(bp->init_ops_offsets); 12686init_offsets_alloc_err: 12687 kfree(bp->init_ops); 12688init_ops_alloc_err: 12689 kfree(bp->init_data); 12690request_firmware_exit: 12691 release_firmware(bp->firmware); 12692 bp->firmware = NULL; 12693 12694 return rc; 12695} 12696 12697static void bnx2x_release_firmware(struct bnx2x *bp) 12698{ 12699 kfree(bp->init_ops_offsets); 12700 kfree(bp->init_ops); 12701 kfree(bp->init_data); 12702 release_firmware(bp->firmware); 12703 bp->firmware = NULL; 12704} 12705 12706static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { 12707 .init_hw_cmn_chip = bnx2x_init_hw_common_chip, 12708 .init_hw_cmn = bnx2x_init_hw_common, 12709 .init_hw_port = bnx2x_init_hw_port, 12710 .init_hw_func = bnx2x_init_hw_func, 12711 12712 .reset_hw_cmn = bnx2x_reset_common, 12713 .reset_hw_port = bnx2x_reset_port, 12714 .reset_hw_func = bnx2x_reset_func, 12715 12716 .gunzip_init = bnx2x_gunzip_init, 12717 .gunzip_end = bnx2x_gunzip_end, 12718 12719 .init_fw = bnx2x_init_firmware, 12720 .release_fw = bnx2x_release_firmware, 12721}; 12722 12723void bnx2x__init_func_obj(struct bnx2x *bp) 12724{ 12725 /* Prepare DMAE related driver resources */ 12726 bnx2x_setup_dmae(bp); 12727 12728 bnx2x_init_func_obj(bp, &bp->func_obj, 12729 bnx2x_sp(bp, func_rdata), 12730 bnx2x_sp_mapping(bp, func_rdata), 12731 bnx2x_sp(bp, func_afex_rdata), 12732 bnx2x_sp_mapping(bp, func_afex_rdata), 12733 &bnx2x_func_sp_drv); 12734} 12735 12736/* must be called after sriov-enable */ 12737static int bnx2x_set_qm_cid_count(struct bnx2x *bp) 12738{ 12739 int cid_count = BNX2X_L2_MAX_CID(bp); 12740 12741 if (IS_SRIOV(bp)) 12742 cid_count += BNX2X_VF_CIDS; 12743 12744 if (CNIC_SUPPORT(bp)) 12745 cid_count += CNIC_CID_MAX; 12746 12747 return roundup(cid_count, QM_CID_ROUND); 12748} 12749 12750/** 12751 * bnx2x_get_num_none_def_sbs - return the number of none default SBs 12752 * 12753 * @dev: pci device 12754 * 12755 */ 12756static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) 12757{ 12758 int index; 12759 u16 control = 0; 12760 12761 /* 12762 * If MSI-X is not supported - return number of SBs needed to support 12763 * one fast path queue: one FP queue + SB for CNIC 12764 */ 12765 if (!pdev->msix_cap) { 12766 dev_info(&pdev->dev, "no msix capability found\n"); 12767 return 1 + cnic_cnt; 12768 } 12769 dev_info(&pdev->dev, "msix capability found\n"); 12770 12771 /* 12772 * The value in the PCI configuration space is the index of the last 12773 * entry, namely one less than the actual size of the table, which is 12774 * exactly what we want to return from this function: number of all SBs 12775 * without the default SB. 12776 * For VFs there is no default SB, then we return (index+1). 12777 */ 12778 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); 12779 12780 index = control & PCI_MSIX_FLAGS_QSIZE; 12781 12782 return index; 12783} 12784 12785static int set_max_cos_est(int chip_id) 12786{ 12787 switch (chip_id) { 12788 case BCM57710: 12789 case BCM57711: 12790 case BCM57711E: 12791 return BNX2X_MULTI_TX_COS_E1X; 12792 case BCM57712: 12793 case BCM57712_MF: 12794 return BNX2X_MULTI_TX_COS_E2_E3A0; 12795 case BCM57800: 12796 case BCM57800_MF: 12797 case BCM57810: 12798 case BCM57810_MF: 12799 case BCM57840_4_10: 12800 case BCM57840_2_20: 12801 case BCM57840_O: 12802 case BCM57840_MFO: 12803 case BCM57840_MF: 12804 case BCM57811: 12805 case BCM57811_MF: 12806 return BNX2X_MULTI_TX_COS_E3B0; 12807 case BCM57712_VF: 12808 case BCM57800_VF: 12809 case BCM57810_VF: 12810 case BCM57840_VF: 12811 case BCM57811_VF: 12812 return 1; 12813 default: 12814 pr_err("Unknown board_type (%d), aborting\n", chip_id); 12815 return -ENODEV; 12816 } 12817} 12818 12819static int set_is_vf(int chip_id) 12820{ 12821 switch (chip_id) { 12822 case BCM57712_VF: 12823 case BCM57800_VF: 12824 case BCM57810_VF: 12825 case BCM57840_VF: 12826 case BCM57811_VF: 12827 return true; 12828 default: 12829 return false; 12830 } 12831} 12832 12833static int bnx2x_init_one(struct pci_dev *pdev, 12834 const struct pci_device_id *ent) 12835{ 12836 struct net_device *dev = NULL; 12837 struct bnx2x *bp; 12838 enum pcie_link_width pcie_width; 12839 enum pci_bus_speed pcie_speed; 12840 int rc, max_non_def_sbs; 12841 int rx_count, tx_count, rss_count, doorbell_size; 12842 int max_cos_est; 12843 bool is_vf; 12844 int cnic_cnt; 12845 12846 /* An estimated maximum supported CoS number according to the chip 12847 * version. 12848 * We will try to roughly estimate the maximum number of CoSes this chip 12849 * may support in order to minimize the memory allocated for Tx 12850 * netdev_queue's. This number will be accurately calculated during the 12851 * initialization of bp->max_cos based on the chip versions AND chip 12852 * revision in the bnx2x_init_bp(). 12853 */ 12854 max_cos_est = set_max_cos_est(ent->driver_data); 12855 if (max_cos_est < 0) 12856 return max_cos_est; 12857 is_vf = set_is_vf(ent->driver_data); 12858 cnic_cnt = is_vf ? 0 : 1; 12859 12860 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); 12861 12862 /* add another SB for VF as it has no default SB */ 12863 max_non_def_sbs += is_vf ? 1 : 0; 12864 12865 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 12866 rss_count = max_non_def_sbs - cnic_cnt; 12867 12868 if (rss_count < 1) 12869 return -EINVAL; 12870 12871 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 12872 rx_count = rss_count + cnic_cnt; 12873 12874 /* Maximum number of netdev Tx queues: 12875 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 12876 */ 12877 tx_count = rss_count * max_cos_est + cnic_cnt; 12878 12879 /* dev zeroed in init_etherdev */ 12880 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 12881 if (!dev) 12882 return -ENOMEM; 12883 12884 bp = netdev_priv(dev); 12885 12886 bp->flags = 0; 12887 if (is_vf) 12888 bp->flags |= IS_VF_FLAG; 12889 12890 bp->igu_sb_cnt = max_non_def_sbs; 12891 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; 12892 bp->msg_enable = debug; 12893 bp->cnic_support = cnic_cnt; 12894 bp->cnic_probe = bnx2x_cnic_probe; 12895 12896 pci_set_drvdata(pdev, dev); 12897 12898 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); 12899 if (rc < 0) { 12900 free_netdev(dev); 12901 return rc; 12902 } 12903 12904 BNX2X_DEV_INFO("This is a %s function\n", 12905 IS_PF(bp) ? "physical" : "virtual"); 12906 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off"); 12907 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs); 12908 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 12909 tx_count, rx_count); 12910 12911 rc = bnx2x_init_bp(bp); 12912 if (rc) 12913 goto init_one_exit; 12914 12915 /* Map doorbells here as we need the real value of bp->max_cos which 12916 * is initialized in bnx2x_init_bp() to determine the number of 12917 * l2 connections. 12918 */ 12919 if (IS_VF(bp)) { 12920 bp->doorbells = bnx2x_vf_doorbells(bp); 12921 rc = bnx2x_vf_pci_alloc(bp); 12922 if (rc) 12923 goto init_one_exit; 12924 } else { 12925 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); 12926 if (doorbell_size > pci_resource_len(pdev, 2)) { 12927 dev_err(&bp->pdev->dev, 12928 "Cannot map doorbells, bar size too small, aborting\n"); 12929 rc = -ENOMEM; 12930 goto init_one_exit; 12931 } 12932 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 12933 doorbell_size); 12934 } 12935 if (!bp->doorbells) { 12936 dev_err(&bp->pdev->dev, 12937 "Cannot map doorbell space, aborting\n"); 12938 rc = -ENOMEM; 12939 goto init_one_exit; 12940 } 12941 12942 if (IS_VF(bp)) { 12943 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); 12944 if (rc) 12945 goto init_one_exit; 12946 } 12947 12948 /* Enable SRIOV if capability found in configuration space */ 12949 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); 12950 if (rc) 12951 goto init_one_exit; 12952 12953 /* calc qm_cid_count */ 12954 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 12955 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count); 12956 12957 /* disable FCOE L2 queue for E1x*/ 12958 if (CHIP_IS_E1x(bp)) 12959 bp->flags |= NO_FCOE_FLAG; 12960 12961 /* Set bp->num_queues for MSI-X mode*/ 12962 bnx2x_set_num_queues(bp); 12963 12964 /* Configure interrupt mode: try to enable MSI-X/MSI if 12965 * needed. 12966 */ 12967 rc = bnx2x_set_int_mode(bp); 12968 if (rc) { 12969 dev_err(&pdev->dev, "Cannot set interrupts\n"); 12970 goto init_one_exit; 12971 } 12972 BNX2X_DEV_INFO("set interrupts successfully\n"); 12973 12974 /* register the net device */ 12975 rc = register_netdev(dev); 12976 if (rc) { 12977 dev_err(&pdev->dev, "Cannot register net device\n"); 12978 goto init_one_exit; 12979 } 12980 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); 12981 12982 if (!NO_FCOE(bp)) { 12983 /* Add storage MAC address */ 12984 rtnl_lock(); 12985 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12986 rtnl_unlock(); 12987 } 12988 if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || 12989 pcie_speed == PCI_SPEED_UNKNOWN || 12990 pcie_width == PCIE_LNK_WIDTH_UNKNOWN) 12991 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n"); 12992 else 12993 BNX2X_DEV_INFO( 12994 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 12995 board_info[ent->driver_data].name, 12996 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 12997 pcie_width, 12998 pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" : 12999 pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" : 13000 pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" : 13001 "Unknown", 13002 dev->base_addr, bp->pdev->irq, dev->dev_addr); 13003 13004 return 0; 13005 13006init_one_exit: 13007 bnx2x_disable_pcie_error_reporting(bp); 13008 13009 if (bp->regview) 13010 iounmap(bp->regview); 13011 13012 if (IS_PF(bp) && bp->doorbells) 13013 iounmap(bp->doorbells); 13014 13015 free_netdev(dev); 13016 13017 if (atomic_read(&pdev->enable_cnt) == 1) 13018 pci_release_regions(pdev); 13019 13020 pci_disable_device(pdev); 13021 13022 return rc; 13023} 13024 13025static void __bnx2x_remove(struct pci_dev *pdev, 13026 struct net_device *dev, 13027 struct bnx2x *bp, 13028 bool remove_netdev) 13029{ 13030 /* Delete storage MAC address */ 13031 if (!NO_FCOE(bp)) { 13032 rtnl_lock(); 13033 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 13034 rtnl_unlock(); 13035 } 13036 13037#ifdef BCM_DCBNL 13038 /* Delete app tlvs from dcbnl */ 13039 bnx2x_dcbnl_update_applist(bp, true); 13040#endif 13041 13042 if (IS_PF(bp) && 13043 !BP_NOMCP(bp) && 13044 (bp->flags & BC_SUPPORTS_RMMOD_CMD)) 13045 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); 13046 13047 /* Close the interface - either directly or implicitly */ 13048 if (remove_netdev) { 13049 unregister_netdev(dev); 13050 } else { 13051 rtnl_lock(); 13052 dev_close(dev); 13053 rtnl_unlock(); 13054 } 13055 13056 bnx2x_iov_remove_one(bp); 13057 13058 /* Power on: we can't let PCI layer write to us while we are in D3 */ 13059 if (IS_PF(bp)) 13060 bnx2x_set_power_state(bp, PCI_D0); 13061 13062 /* Disable MSI/MSI-X */ 13063 bnx2x_disable_msi(bp); 13064 13065 /* Power off */ 13066 if (IS_PF(bp)) 13067 bnx2x_set_power_state(bp, PCI_D3hot); 13068 13069 /* Make sure RESET task is not scheduled before continuing */ 13070 cancel_delayed_work_sync(&bp->sp_rtnl_task); 13071 13072 /* send message via vfpf channel to release the resources of this vf */ 13073 if (IS_VF(bp)) 13074 bnx2x_vfpf_release(bp); 13075 13076 /* Assumes no further PCIe PM changes will occur */ 13077 if (system_state == SYSTEM_POWER_OFF) { 13078 pci_wake_from_d3(pdev, bp->wol); 13079 pci_set_power_state(pdev, PCI_D3hot); 13080 } 13081 13082 bnx2x_disable_pcie_error_reporting(bp); 13083 13084 if (bp->regview) 13085 iounmap(bp->regview); 13086 13087 /* for vf doorbells are part of the regview and were unmapped along with 13088 * it. FW is only loaded by PF. 13089 */ 13090 if (IS_PF(bp)) { 13091 if (bp->doorbells) 13092 iounmap(bp->doorbells); 13093 13094 bnx2x_release_firmware(bp); 13095 } 13096 bnx2x_free_mem_bp(bp); 13097 13098 if (remove_netdev) 13099 free_netdev(dev); 13100 13101 if (atomic_read(&pdev->enable_cnt) == 1) 13102 pci_release_regions(pdev); 13103 13104 pci_disable_device(pdev); 13105} 13106 13107static void bnx2x_remove_one(struct pci_dev *pdev) 13108{ 13109 struct net_device *dev = pci_get_drvdata(pdev); 13110 struct bnx2x *bp; 13111 13112 if (!dev) { 13113 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 13114 return; 13115 } 13116 bp = netdev_priv(dev); 13117 13118 __bnx2x_remove(pdev, dev, bp, true); 13119} 13120 13121static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 13122{ 13123 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 13124 13125 bp->rx_mode = BNX2X_RX_MODE_NONE; 13126 13127 if (CNIC_LOADED(bp)) 13128 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 13129 13130 /* Stop Tx */ 13131 bnx2x_tx_disable(bp); 13132 /* Delete all NAPI objects */ 13133 bnx2x_del_all_napi(bp); 13134 if (CNIC_LOADED(bp)) 13135 bnx2x_del_all_napi_cnic(bp); 13136 netdev_reset_tc(bp->dev); 13137 13138 del_timer_sync(&bp->timer); 13139 cancel_delayed_work(&bp->sp_task); 13140 cancel_delayed_work(&bp->period_task); 13141 13142 spin_lock_bh(&bp->stats_lock); 13143 bp->stats_state = STATS_STATE_DISABLED; 13144 spin_unlock_bh(&bp->stats_lock); 13145 13146 bnx2x_save_statistics(bp); 13147 13148 netif_carrier_off(bp->dev); 13149 13150 return 0; 13151} 13152 13153/** 13154 * bnx2x_io_error_detected - called when PCI error is detected 13155 * @pdev: Pointer to PCI device 13156 * @state: The current pci connection state 13157 * 13158 * This function is called after a PCI bus error affecting 13159 * this device has been detected. 13160 */ 13161static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev, 13162 pci_channel_state_t state) 13163{ 13164 struct net_device *dev = pci_get_drvdata(pdev); 13165 struct bnx2x *bp = netdev_priv(dev); 13166 13167 rtnl_lock(); 13168 13169 BNX2X_ERR("IO error detected\n"); 13170 13171 netif_device_detach(dev); 13172 13173 if (state == pci_channel_io_perm_failure) { 13174 rtnl_unlock(); 13175 return PCI_ERS_RESULT_DISCONNECT; 13176 } 13177 13178 if (netif_running(dev)) 13179 bnx2x_eeh_nic_unload(bp); 13180 13181 bnx2x_prev_path_mark_eeh(bp); 13182 13183 pci_disable_device(pdev); 13184 13185 rtnl_unlock(); 13186 13187 /* Request a slot reset */ 13188 return PCI_ERS_RESULT_NEED_RESET; 13189} 13190 13191/** 13192 * bnx2x_io_slot_reset - called after the PCI bus has been reset 13193 * @pdev: Pointer to PCI device 13194 * 13195 * Restart the card from scratch, as if from a cold-boot. 13196 */ 13197static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) 13198{ 13199 struct net_device *dev = pci_get_drvdata(pdev); 13200 struct bnx2x *bp = netdev_priv(dev); 13201 int i; 13202 13203 rtnl_lock(); 13204 BNX2X_ERR("IO slot reset initializing...\n"); 13205 if (pci_enable_device(pdev)) { 13206 dev_err(&pdev->dev, 13207 "Cannot re-enable PCI device after reset\n"); 13208 rtnl_unlock(); 13209 return PCI_ERS_RESULT_DISCONNECT; 13210 } 13211 13212 pci_set_master(pdev); 13213 pci_restore_state(pdev); 13214 pci_save_state(pdev); 13215 13216 if (netif_running(dev)) 13217 bnx2x_set_power_state(bp, PCI_D0); 13218 13219 if (netif_running(dev)) { 13220 BNX2X_ERR("IO slot reset --> driver unload\n"); 13221 13222 /* MCP should have been reset; Need to wait for validity */ 13223 bnx2x_init_shmem(bp); 13224 13225 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { 13226 u32 v; 13227 13228 v = SHMEM2_RD(bp, 13229 drv_capabilities_flag[BP_FW_MB_IDX(bp)]); 13230 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)], 13231 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); 13232 } 13233 bnx2x_drain_tx_queues(bp); 13234 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); 13235 bnx2x_netif_stop(bp, 1); 13236 bnx2x_free_irq(bp); 13237 13238 /* Report UNLOAD_DONE to MCP */ 13239 bnx2x_send_unload_done(bp, true); 13240 13241 bp->sp_state = 0; 13242 bp->port.pmf = 0; 13243 13244 bnx2x_prev_unload(bp); 13245 13246 /* We should have reseted the engine, so It's fair to 13247 * assume the FW will no longer write to the bnx2x driver. 13248 */ 13249 bnx2x_squeeze_objects(bp); 13250 bnx2x_free_skbs(bp); 13251 for_each_rx_queue(bp, i) 13252 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 13253 bnx2x_free_fp_mem(bp); 13254 bnx2x_free_mem(bp); 13255 13256 bp->state = BNX2X_STATE_CLOSED; 13257 } 13258 13259 rtnl_unlock(); 13260 13261 /* If AER, perform cleanup of the PCIe registers */ 13262 if (bp->flags & AER_ENABLED) { 13263 if (pci_cleanup_aer_uncorrect_error_status(pdev)) 13264 BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n"); 13265 else 13266 DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n"); 13267 } 13268 13269 return PCI_ERS_RESULT_RECOVERED; 13270} 13271 13272/** 13273 * bnx2x_io_resume - called when traffic can start flowing again 13274 * @pdev: Pointer to PCI device 13275 * 13276 * This callback is called when the error recovery driver tells us that 13277 * its OK to resume normal operation. 13278 */ 13279static void bnx2x_io_resume(struct pci_dev *pdev) 13280{ 13281 struct net_device *dev = pci_get_drvdata(pdev); 13282 struct bnx2x *bp = netdev_priv(dev); 13283 13284 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 13285 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); 13286 return; 13287 } 13288 13289 rtnl_lock(); 13290 13291 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & 13292 DRV_MSG_SEQ_NUMBER_MASK; 13293 13294 if (netif_running(dev)) 13295 bnx2x_nic_load(bp, LOAD_NORMAL); 13296 13297 netif_device_attach(dev); 13298 13299 rtnl_unlock(); 13300} 13301 13302static const struct pci_error_handlers bnx2x_err_handler = { 13303 .error_detected = bnx2x_io_error_detected, 13304 .slot_reset = bnx2x_io_slot_reset, 13305 .resume = bnx2x_io_resume, 13306}; 13307 13308static void bnx2x_shutdown(struct pci_dev *pdev) 13309{ 13310 struct net_device *dev = pci_get_drvdata(pdev); 13311 struct bnx2x *bp; 13312 13313 if (!dev) 13314 return; 13315 13316 bp = netdev_priv(dev); 13317 if (!bp) 13318 return; 13319 13320 rtnl_lock(); 13321 netif_device_detach(dev); 13322 rtnl_unlock(); 13323 13324 /* Don't remove the netdevice, as there are scenarios which will cause 13325 * the kernel to hang, e.g., when trying to remove bnx2i while the 13326 * rootfs is mounted from SAN. 13327 */ 13328 __bnx2x_remove(pdev, dev, bp, false); 13329} 13330 13331static struct pci_driver bnx2x_pci_driver = { 13332 .name = DRV_MODULE_NAME, 13333 .id_table = bnx2x_pci_tbl, 13334 .probe = bnx2x_init_one, 13335 .remove = bnx2x_remove_one, 13336 .suspend = bnx2x_suspend, 13337 .resume = bnx2x_resume, 13338 .err_handler = &bnx2x_err_handler, 13339#ifdef CONFIG_BNX2X_SRIOV 13340 .sriov_configure = bnx2x_sriov_configure, 13341#endif 13342 .shutdown = bnx2x_shutdown, 13343}; 13344 13345static int __init bnx2x_init(void) 13346{ 13347 int ret; 13348 13349 pr_info("%s", version); 13350 13351 bnx2x_wq = create_singlethread_workqueue("bnx2x"); 13352 if (bnx2x_wq == NULL) { 13353 pr_err("Cannot create workqueue\n"); 13354 return -ENOMEM; 13355 } 13356 13357 ret = pci_register_driver(&bnx2x_pci_driver); 13358 if (ret) { 13359 pr_err("Cannot register driver\n"); 13360 destroy_workqueue(bnx2x_wq); 13361 } 13362 return ret; 13363} 13364 13365static void __exit bnx2x_cleanup(void) 13366{ 13367 struct list_head *pos, *q; 13368 13369 pci_unregister_driver(&bnx2x_pci_driver); 13370 13371 destroy_workqueue(bnx2x_wq); 13372 13373 /* Free globally allocated resources */ 13374 list_for_each_safe(pos, q, &bnx2x_prev_list) { 13375 struct bnx2x_prev_path_list *tmp = 13376 list_entry(pos, struct bnx2x_prev_path_list, list); 13377 list_del(pos); 13378 kfree(tmp); 13379 } 13380} 13381 13382void bnx2x_notify_link_changed(struct bnx2x *bp) 13383{ 13384 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); 13385} 13386 13387module_init(bnx2x_init); 13388module_exit(bnx2x_cleanup); 13389 13390/** 13391 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 13392 * 13393 * @bp: driver handle 13394 * @set: set or clear the CAM entry 13395 * 13396 * This function will wait until the ramrod completion returns. 13397 * Return 0 if success, -ENODEV if ramrod doesn't return. 13398 */ 13399static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 13400{ 13401 unsigned long ramrod_flags = 0; 13402 13403 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 13404 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, 13405 &bp->iscsi_l2_mac_obj, true, 13406 BNX2X_ISCSI_ETH_MAC, &ramrod_flags); 13407} 13408 13409/* count denotes the number of new completions we have seen */ 13410static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) 13411{ 13412 struct eth_spe *spe; 13413 int cxt_index, cxt_offset; 13414 13415#ifdef BNX2X_STOP_ON_ERROR 13416 if (unlikely(bp->panic)) 13417 return; 13418#endif 13419 13420 spin_lock_bh(&bp->spq_lock); 13421 BUG_ON(bp->cnic_spq_pending < count); 13422 bp->cnic_spq_pending -= count; 13423 13424 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) { 13425 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) 13426 & SPE_HDR_CONN_TYPE) >> 13427 SPE_HDR_CONN_TYPE_SHIFT; 13428 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) 13429 >> SPE_HDR_CMD_ID_SHIFT) & 0xff; 13430 13431 /* Set validation for iSCSI L2 client before sending SETUP 13432 * ramrod 13433 */ 13434 if (type == ETH_CONNECTION_TYPE) { 13435 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { 13436 cxt_index = BNX2X_ISCSI_ETH_CID(bp) / 13437 ILT_PAGE_CIDS; 13438 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - 13439 (cxt_index * ILT_PAGE_CIDS); 13440 bnx2x_set_ctx_validation(bp, 13441 &bp->context[cxt_index]. 13442 vcxt[cxt_offset].eth, 13443 BNX2X_ISCSI_ETH_CID(bp)); 13444 } 13445 } 13446 13447 /* 13448 * There may be not more than 8 L2, not more than 8 L5 SPEs 13449 * and in the air. We also check that number of outstanding 13450 * COMMON ramrods is not more than the EQ and SPQ can 13451 * accommodate. 13452 */ 13453 if (type == ETH_CONNECTION_TYPE) { 13454 if (!atomic_read(&bp->cq_spq_left)) 13455 break; 13456 else 13457 atomic_dec(&bp->cq_spq_left); 13458 } else if (type == NONE_CONNECTION_TYPE) { 13459 if (!atomic_read(&bp->eq_spq_left)) 13460 break; 13461 else 13462 atomic_dec(&bp->eq_spq_left); 13463 } else if ((type == ISCSI_CONNECTION_TYPE) || 13464 (type == FCOE_CONNECTION_TYPE)) { 13465 if (bp->cnic_spq_pending >= 13466 bp->cnic_eth_dev.max_kwqe_pending) 13467 break; 13468 else 13469 bp->cnic_spq_pending++; 13470 } else { 13471 BNX2X_ERR("Unknown SPE type: %d\n", type); 13472 bnx2x_panic(); 13473 break; 13474 } 13475 13476 spe = bnx2x_sp_get_next(bp); 13477 *spe = *bp->cnic_kwq_cons; 13478 13479 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n", 13480 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 13481 13482 if (bp->cnic_kwq_cons == bp->cnic_kwq_last) 13483 bp->cnic_kwq_cons = bp->cnic_kwq; 13484 else 13485 bp->cnic_kwq_cons++; 13486 } 13487 bnx2x_sp_prod_update(bp); 13488 spin_unlock_bh(&bp->spq_lock); 13489} 13490 13491static int bnx2x_cnic_sp_queue(struct net_device *dev, 13492 struct kwqe_16 *kwqes[], u32 count) 13493{ 13494 struct bnx2x *bp = netdev_priv(dev); 13495 int i; 13496 13497#ifdef BNX2X_STOP_ON_ERROR 13498 if (unlikely(bp->panic)) { 13499 BNX2X_ERR("Can't post to SP queue while panic\n"); 13500 return -EIO; 13501 } 13502#endif 13503 13504 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) && 13505 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) { 13506 BNX2X_ERR("Handling parity error recovery. Try again later\n"); 13507 return -EAGAIN; 13508 } 13509 13510 spin_lock_bh(&bp->spq_lock); 13511 13512 for (i = 0; i < count; i++) { 13513 struct eth_spe *spe = (struct eth_spe *)kwqes[i]; 13514 13515 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT) 13516 break; 13517 13518 *bp->cnic_kwq_prod = *spe; 13519 13520 bp->cnic_kwq_pending++; 13521 13522 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n", 13523 spe->hdr.conn_and_cmd_data, spe->hdr.type, 13524 spe->data.update_data_addr.hi, 13525 spe->data.update_data_addr.lo, 13526 bp->cnic_kwq_pending); 13527 13528 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 13529 bp->cnic_kwq_prod = bp->cnic_kwq; 13530 else 13531 bp->cnic_kwq_prod++; 13532 } 13533 13534 spin_unlock_bh(&bp->spq_lock); 13535 13536 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending) 13537 bnx2x_cnic_sp_post(bp, 0); 13538 13539 return i; 13540} 13541 13542static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) 13543{ 13544 struct cnic_ops *c_ops; 13545 int rc = 0; 13546 13547 mutex_lock(&bp->cnic_mutex); 13548 c_ops = rcu_dereference_protected(bp->cnic_ops, 13549 lockdep_is_held(&bp->cnic_mutex)); 13550 if (c_ops) 13551 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 13552 mutex_unlock(&bp->cnic_mutex); 13553 13554 return rc; 13555} 13556 13557static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl) 13558{ 13559 struct cnic_ops *c_ops; 13560 int rc = 0; 13561 13562 rcu_read_lock(); 13563 c_ops = rcu_dereference(bp->cnic_ops); 13564 if (c_ops) 13565 rc = c_ops->cnic_ctl(bp->cnic_data, ctl); 13566 rcu_read_unlock(); 13567 13568 return rc; 13569} 13570 13571/* 13572 * for commands that have no data 13573 */ 13574int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) 13575{ 13576 struct cnic_ctl_info ctl = {0}; 13577 13578 ctl.cmd = cmd; 13579 13580 return bnx2x_cnic_ctl_send(bp, &ctl); 13581} 13582 13583static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) 13584{ 13585 struct cnic_ctl_info ctl = {0}; 13586 13587 /* first we tell CNIC and only then we count this as a completion */ 13588 ctl.cmd = CNIC_CTL_COMPLETION_CMD; 13589 ctl.data.comp.cid = cid; 13590 ctl.data.comp.error = err; 13591 13592 bnx2x_cnic_ctl_send_bh(bp, &ctl); 13593 bnx2x_cnic_sp_post(bp, 0); 13594} 13595 13596/* Called with netif_addr_lock_bh() taken. 13597 * Sets an rx_mode config for an iSCSI ETH client. 13598 * Doesn't block. 13599 * Completion should be checked outside. 13600 */ 13601static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) 13602{ 13603 unsigned long accept_flags = 0, ramrod_flags = 0; 13604 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 13605 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; 13606 13607 if (start) { 13608 /* Start accepting on iSCSI L2 ring. Accept all multicasts 13609 * because it's the only way for UIO Queue to accept 13610 * multicasts (in non-promiscuous mode only one Queue per 13611 * function will receive multicast packets (leading in our 13612 * case). 13613 */ 13614 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); 13615 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); 13616 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); 13617 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 13618 13619 /* Clear STOP_PENDING bit if START is requested */ 13620 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); 13621 13622 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; 13623 } else 13624 /* Clear START_PENDING bit if STOP is requested */ 13625 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); 13626 13627 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) 13628 set_bit(sched_state, &bp->sp_state); 13629 else { 13630 __set_bit(RAMROD_RX, &ramrod_flags); 13631 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, 13632 ramrod_flags); 13633 } 13634} 13635 13636static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 13637{ 13638 struct bnx2x *bp = netdev_priv(dev); 13639 int rc = 0; 13640 13641 switch (ctl->cmd) { 13642 case DRV_CTL_CTXTBL_WR_CMD: { 13643 u32 index = ctl->data.io.offset; 13644 dma_addr_t addr = ctl->data.io.dma_addr; 13645 13646 bnx2x_ilt_wr(bp, index, addr); 13647 break; 13648 } 13649 13650 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: { 13651 int count = ctl->data.credit.credit_count; 13652 13653 bnx2x_cnic_sp_post(bp, count); 13654 break; 13655 } 13656 13657 /* rtnl_lock is held. */ 13658 case DRV_CTL_START_L2_CMD: { 13659 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13660 unsigned long sp_bits = 0; 13661 13662 /* Configure the iSCSI classification object */ 13663 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, 13664 cp->iscsi_l2_client_id, 13665 cp->iscsi_l2_cid, BP_FUNC(bp), 13666 bnx2x_sp(bp, mac_rdata), 13667 bnx2x_sp_mapping(bp, mac_rdata), 13668 BNX2X_FILTER_MAC_PENDING, 13669 &bp->sp_state, BNX2X_OBJ_TYPE_RX, 13670 &bp->macs_pool); 13671 13672 /* Set iSCSI MAC address */ 13673 rc = bnx2x_set_iscsi_eth_mac_addr(bp); 13674 if (rc) 13675 break; 13676 13677 mmiowb(); 13678 barrier(); 13679 13680 /* Start accepting on iSCSI L2 ring */ 13681 13682 netif_addr_lock_bh(dev); 13683 bnx2x_set_iscsi_eth_rx_mode(bp, true); 13684 netif_addr_unlock_bh(dev); 13685 13686 /* bits to wait on */ 13687 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 13688 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); 13689 13690 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 13691 BNX2X_ERR("rx_mode completion timed out!\n"); 13692 13693 break; 13694 } 13695 13696 /* rtnl_lock is held. */ 13697 case DRV_CTL_STOP_L2_CMD: { 13698 unsigned long sp_bits = 0; 13699 13700 /* Stop accepting on iSCSI L2 ring */ 13701 netif_addr_lock_bh(dev); 13702 bnx2x_set_iscsi_eth_rx_mode(bp, false); 13703 netif_addr_unlock_bh(dev); 13704 13705 /* bits to wait on */ 13706 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); 13707 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); 13708 13709 if (!bnx2x_wait_sp_comp(bp, sp_bits)) 13710 BNX2X_ERR("rx_mode completion timed out!\n"); 13711 13712 mmiowb(); 13713 barrier(); 13714 13715 /* Unset iSCSI L2 MAC */ 13716 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, 13717 BNX2X_ISCSI_ETH_MAC, true); 13718 break; 13719 } 13720 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 13721 int count = ctl->data.credit.credit_count; 13722 13723 smp_mb__before_atomic_inc(); 13724 atomic_add(count, &bp->cq_spq_left); 13725 smp_mb__after_atomic_inc(); 13726 break; 13727 } 13728 case DRV_CTL_ULP_REGISTER_CMD: { 13729 int ulp_type = ctl->data.register_data.ulp_type; 13730 13731 if (CHIP_IS_E3(bp)) { 13732 int idx = BP_FW_MB_IDX(bp); 13733 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 13734 int path = BP_PATH(bp); 13735 int port = BP_PORT(bp); 13736 int i; 13737 u32 scratch_offset; 13738 u32 *host_addr; 13739 13740 /* first write capability to shmem2 */ 13741 if (ulp_type == CNIC_ULP_ISCSI) 13742 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 13743 else if (ulp_type == CNIC_ULP_FCOE) 13744 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 13745 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 13746 13747 if ((ulp_type != CNIC_ULP_FCOE) || 13748 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) || 13749 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES))) 13750 break; 13751 13752 /* if reached here - should write fcoe capabilities */ 13753 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr); 13754 if (!scratch_offset) 13755 break; 13756 scratch_offset += offsetof(struct glob_ncsi_oem_data, 13757 fcoe_features[path][port]); 13758 host_addr = (u32 *) &(ctl->data.register_data. 13759 fcoe_features); 13760 for (i = 0; i < sizeof(struct fcoe_capabilities); 13761 i += 4) 13762 REG_WR(bp, scratch_offset + i, 13763 *(host_addr + i/4)); 13764 } 13765 break; 13766 } 13767 13768 case DRV_CTL_ULP_UNREGISTER_CMD: { 13769 int ulp_type = ctl->data.ulp_type; 13770 13771 if (CHIP_IS_E3(bp)) { 13772 int idx = BP_FW_MB_IDX(bp); 13773 u32 cap; 13774 13775 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]); 13776 if (ulp_type == CNIC_ULP_ISCSI) 13777 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI; 13778 else if (ulp_type == CNIC_ULP_FCOE) 13779 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE; 13780 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap); 13781 } 13782 break; 13783 } 13784 13785 default: 13786 BNX2X_ERR("unknown command %x\n", ctl->cmd); 13787 rc = -EINVAL; 13788 } 13789 13790 return rc; 13791} 13792 13793void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) 13794{ 13795 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13796 13797 if (bp->flags & USING_MSIX_FLAG) { 13798 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; 13799 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; 13800 cp->irq_arr[0].vector = bp->msix_table[1].vector; 13801 } else { 13802 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 13803 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 13804 } 13805 if (!CHIP_IS_E1x(bp)) 13806 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; 13807 else 13808 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; 13809 13810 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); 13811 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); 13812 cp->irq_arr[1].status_blk = bp->def_status_blk; 13813 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 13814 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; 13815 13816 cp->num_irq = 2; 13817} 13818 13819void bnx2x_setup_cnic_info(struct bnx2x *bp) 13820{ 13821 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13822 13823 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 13824 bnx2x_cid_ilt_lines(bp); 13825 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 13826 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 13827 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 13828 13829 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n", 13830 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid, 13831 cp->iscsi_l2_cid); 13832 13833 if (NO_ISCSI_OOO(bp)) 13834 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 13835} 13836 13837static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, 13838 void *data) 13839{ 13840 struct bnx2x *bp = netdev_priv(dev); 13841 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13842 int rc; 13843 13844 DP(NETIF_MSG_IFUP, "Register_cnic called\n"); 13845 13846 if (ops == NULL) { 13847 BNX2X_ERR("NULL ops received\n"); 13848 return -EINVAL; 13849 } 13850 13851 if (!CNIC_SUPPORT(bp)) { 13852 BNX2X_ERR("Can't register CNIC when not supported\n"); 13853 return -EOPNOTSUPP; 13854 } 13855 13856 if (!CNIC_LOADED(bp)) { 13857 rc = bnx2x_load_cnic(bp); 13858 if (rc) { 13859 BNX2X_ERR("CNIC-related load failed\n"); 13860 return rc; 13861 } 13862 } 13863 13864 bp->cnic_enabled = true; 13865 13866 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 13867 if (!bp->cnic_kwq) 13868 return -ENOMEM; 13869 13870 bp->cnic_kwq_cons = bp->cnic_kwq; 13871 bp->cnic_kwq_prod = bp->cnic_kwq; 13872 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT; 13873 13874 bp->cnic_spq_pending = 0; 13875 bp->cnic_kwq_pending = 0; 13876 13877 bp->cnic_data = data; 13878 13879 cp->num_irq = 0; 13880 cp->drv_state |= CNIC_DRV_STATE_REGD; 13881 cp->iro_arr = bp->iro_arr; 13882 13883 bnx2x_setup_cnic_irq_info(bp); 13884 13885 rcu_assign_pointer(bp->cnic_ops, ops); 13886 13887 return 0; 13888} 13889 13890static int bnx2x_unregister_cnic(struct net_device *dev) 13891{ 13892 struct bnx2x *bp = netdev_priv(dev); 13893 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13894 13895 mutex_lock(&bp->cnic_mutex); 13896 cp->drv_state = 0; 13897 RCU_INIT_POINTER(bp->cnic_ops, NULL); 13898 mutex_unlock(&bp->cnic_mutex); 13899 synchronize_rcu(); 13900 bp->cnic_enabled = false; 13901 kfree(bp->cnic_kwq); 13902 bp->cnic_kwq = NULL; 13903 13904 return 0; 13905} 13906 13907static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) 13908{ 13909 struct bnx2x *bp = netdev_priv(dev); 13910 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 13911 13912 /* If both iSCSI and FCoE are disabled - return NULL in 13913 * order to indicate CNIC that it should not try to work 13914 * with this device. 13915 */ 13916 if (NO_ISCSI(bp) && NO_FCOE(bp)) 13917 return NULL; 13918 13919 cp->drv_owner = THIS_MODULE; 13920 cp->chip_id = CHIP_ID(bp); 13921 cp->pdev = bp->pdev; 13922 cp->io_base = bp->regview; 13923 cp->io_base2 = bp->doorbells; 13924 cp->max_kwqe_pending = 8; 13925 cp->ctx_blk_size = CDU_ILT_PAGE_SZ; 13926 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 13927 bnx2x_cid_ilt_lines(bp); 13928 cp->ctx_tbl_len = CNIC_ILT_LINES; 13929 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; 13930 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 13931 cp->drv_ctl = bnx2x_drv_ctl; 13932 cp->drv_register_cnic = bnx2x_register_cnic; 13933 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 13934 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); 13935 cp->iscsi_l2_client_id = 13936 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); 13937 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); 13938 13939 if (NO_ISCSI_OOO(bp)) 13940 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; 13941 13942 if (NO_ISCSI(bp)) 13943 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; 13944 13945 if (NO_FCOE(bp)) 13946 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; 13947 13948 BNX2X_DEV_INFO( 13949 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n", 13950 cp->ctx_blk_size, 13951 cp->ctx_tbl_offset, 13952 cp->ctx_tbl_len, 13953 cp->starting_cid); 13954 return cp; 13955} 13956 13957static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 13958{ 13959 struct bnx2x *bp = fp->bp; 13960 u32 offset = BAR_USTRORM_INTMEM; 13961 13962 if (IS_VF(bp)) 13963 return bnx2x_vf_ustorm_prods_offset(bp, fp); 13964 else if (!CHIP_IS_E1x(bp)) 13965 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 13966 else 13967 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 13968 13969 return offset; 13970} 13971 13972/* called only on E1H or E2. 13973 * When pretending to be PF, the pretend value is the function number 0...7 13974 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 13975 * combination 13976 */ 13977int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) 13978{ 13979 u32 pretend_reg; 13980 13981 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX) 13982 return -1; 13983 13984 /* get my own pretend register */ 13985 pretend_reg = bnx2x_get_pretend_reg(bp); 13986 REG_WR(bp, pretend_reg, pretend_func_val); 13987 REG_RD(bp, pretend_reg); 13988 return 0; 13989} 13990