bnx2x_cmn.h revision 51c1a580b1e07d58eb063f9f4a70aea8ad32fe23
1/* bnx2x_cmn.h: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17#ifndef BNX2X_CMN_H 18#define BNX2X_CMN_H 19 20#include <linux/types.h> 21#include <linux/pci.h> 22#include <linux/netdevice.h> 23#include <linux/etherdevice.h> 24 25 26#include "bnx2x.h" 27 28/* This is used as a replacement for an MCP if it's not present */ 29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 30 31extern int num_queues; 32 33/************************ Macros ********************************/ 34#define BNX2X_PCI_FREE(x, y, size) \ 35 do { \ 36 if (x) { \ 37 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 38 x = NULL; \ 39 y = 0; \ 40 } \ 41 } while (0) 42 43#define BNX2X_FREE(x) \ 44 do { \ 45 if (x) { \ 46 kfree((void *)x); \ 47 x = NULL; \ 48 } \ 49 } while (0) 50 51#define BNX2X_PCI_ALLOC(x, y, size) \ 52 do { \ 53 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 54 if (x == NULL) \ 55 goto alloc_mem_err; \ 56 memset((void *)x, 0, size); \ 57 } while (0) 58 59#define BNX2X_ALLOC(x, size) \ 60 do { \ 61 x = kzalloc(size, GFP_KERNEL); \ 62 if (x == NULL) \ 63 goto alloc_mem_err; \ 64 } while (0) 65 66/*********************** Interfaces **************************** 67 * Functions that need to be implemented by each driver version 68 */ 69/* Init */ 70 71/** 72 * bnx2x_send_unload_req - request unload mode from the MCP. 73 * 74 * @bp: driver handle 75 * @unload_mode: requested function's unload mode 76 * 77 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 78 */ 79u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 80 81/** 82 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 83 * 84 * @bp: driver handle 85 */ 86void bnx2x_send_unload_done(struct bnx2x *bp); 87 88/** 89 * bnx2x_config_rss_pf - configure RSS parameters. 90 * 91 * @bp: driver handle 92 * @ind_table: indirection table to configure 93 * @config_hash: re-configure RSS hash keys configuration 94 */ 95int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); 96 97/** 98 * bnx2x__init_func_obj - init function object 99 * 100 * @bp: driver handle 101 * 102 * Initializes the Function Object with the appropriate 103 * parameters which include a function slow path driver 104 * interface. 105 */ 106void bnx2x__init_func_obj(struct bnx2x *bp); 107 108/** 109 * bnx2x_setup_queue - setup eth queue. 110 * 111 * @bp: driver handle 112 * @fp: pointer to the fastpath structure 113 * @leading: boolean 114 * 115 */ 116int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 117 bool leading); 118 119/** 120 * bnx2x_setup_leading - bring up a leading eth queue. 121 * 122 * @bp: driver handle 123 */ 124int bnx2x_setup_leading(struct bnx2x *bp); 125 126/** 127 * bnx2x_fw_command - send the MCP a request 128 * 129 * @bp: driver handle 130 * @command: request 131 * @param: request's parameter 132 * 133 * block until there is a reply 134 */ 135u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 136 137/** 138 * bnx2x_initial_phy_init - initialize link parameters structure variables. 139 * 140 * @bp: driver handle 141 * @load_mode: current mode 142 */ 143u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 144 145/** 146 * bnx2x_link_set - configure hw according to link parameters structure. 147 * 148 * @bp: driver handle 149 */ 150void bnx2x_link_set(struct bnx2x *bp); 151 152/** 153 * bnx2x_link_test - query link status. 154 * 155 * @bp: driver handle 156 * @is_serdes: bool 157 * 158 * Returns 0 if link is UP. 159 */ 160u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 161 162/** 163 * bnx2x_drv_pulse - write driver pulse to shmem 164 * 165 * @bp: driver handle 166 * 167 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 168 * in the shmem. 169 */ 170void bnx2x_drv_pulse(struct bnx2x *bp); 171 172/** 173 * bnx2x_igu_ack_sb - update IGU with current SB value 174 * 175 * @bp: driver handle 176 * @igu_sb_id: SB id 177 * @segment: SB segment 178 * @index: SB index 179 * @op: SB operation 180 * @update: is HW update required 181 */ 182void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 183 u16 index, u8 op, u8 update); 184 185/* Disable transactions from chip to host */ 186void bnx2x_pf_disable(struct bnx2x *bp); 187 188/** 189 * bnx2x__link_status_update - handles link status change. 190 * 191 * @bp: driver handle 192 */ 193void bnx2x__link_status_update(struct bnx2x *bp); 194 195/** 196 * bnx2x_link_report - report link status to upper layer. 197 * 198 * @bp: driver handle 199 */ 200void bnx2x_link_report(struct bnx2x *bp); 201 202/* None-atomic version of bnx2x_link_report() */ 203void __bnx2x_link_report(struct bnx2x *bp); 204 205/** 206 * bnx2x_get_mf_speed - calculate MF speed. 207 * 208 * @bp: driver handle 209 * 210 * Takes into account current linespeed and MF configuration. 211 */ 212u16 bnx2x_get_mf_speed(struct bnx2x *bp); 213 214/** 215 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler 216 * 217 * @irq: irq number 218 * @dev_instance: private instance 219 */ 220irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 221 222/** 223 * bnx2x_interrupt - non MSI-X interrupt handler 224 * 225 * @irq: irq number 226 * @dev_instance: private instance 227 */ 228irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 229#ifdef BCM_CNIC 230 231/** 232 * bnx2x_cnic_notify - send command to cnic driver 233 * 234 * @bp: driver handle 235 * @cmd: command 236 */ 237int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 238 239/** 240 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information 241 * 242 * @bp: driver handle 243 */ 244void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 245#endif 246 247/** 248 * bnx2x_int_enable - enable HW interrupts. 249 * 250 * @bp: driver handle 251 */ 252void bnx2x_int_enable(struct bnx2x *bp); 253 254/** 255 * bnx2x_int_disable_sync - disable interrupts. 256 * 257 * @bp: driver handle 258 * @disable_hw: true, disable HW interrupts. 259 * 260 * This function ensures that there are no 261 * ISRs or SP DPCs (sp_task) are running after it returns. 262 */ 263void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 264 265/** 266 * bnx2x_nic_init - init driver internals. 267 * 268 * @bp: driver handle 269 * @load_code: COMMON, PORT or FUNCTION 270 * 271 * Initializes: 272 * - rings 273 * - status blocks 274 * - etc. 275 */ 276void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 277 278/** 279 * bnx2x_alloc_mem - allocate driver's memory. 280 * 281 * @bp: driver handle 282 */ 283int bnx2x_alloc_mem(struct bnx2x *bp); 284 285/** 286 * bnx2x_free_mem - release driver's memory. 287 * 288 * @bp: driver handle 289 */ 290void bnx2x_free_mem(struct bnx2x *bp); 291 292/** 293 * bnx2x_set_num_queues - set number of queues according to mode. 294 * 295 * @bp: driver handle 296 */ 297void bnx2x_set_num_queues(struct bnx2x *bp); 298 299/** 300 * bnx2x_chip_cleanup - cleanup chip internals. 301 * 302 * @bp: driver handle 303 * @unload_mode: COMMON, PORT, FUNCTION 304 * 305 * - Cleanup MAC configuration. 306 * - Closes clients. 307 * - etc. 308 */ 309void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); 310 311/** 312 * bnx2x_acquire_hw_lock - acquire HW lock. 313 * 314 * @bp: driver handle 315 * @resource: resource bit which was locked 316 */ 317int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 318 319/** 320 * bnx2x_release_hw_lock - release HW lock. 321 * 322 * @bp: driver handle 323 * @resource: resource bit which was locked 324 */ 325int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 326 327/** 328 * bnx2x_release_leader_lock - release recovery leader lock 329 * 330 * @bp: driver handle 331 */ 332int bnx2x_release_leader_lock(struct bnx2x *bp); 333 334/** 335 * bnx2x_set_eth_mac - configure eth MAC address in the HW 336 * 337 * @bp: driver handle 338 * @set: set or clear 339 * 340 * Configures according to the value in netdev->dev_addr. 341 */ 342int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 343 344/** 345 * bnx2x_set_rx_mode - set MAC filtering configurations. 346 * 347 * @dev: netdevice 348 * 349 * called with netif_tx_lock from dev_mcast.c 350 * If bp->state is OPEN, should be called with 351 * netif_addr_lock_bh() 352 */ 353void bnx2x_set_rx_mode(struct net_device *dev); 354 355/** 356 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 357 * 358 * @bp: driver handle 359 * 360 * If bp->state is OPEN, should be called with 361 * netif_addr_lock_bh(). 362 */ 363void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 364 365/** 366 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. 367 * 368 * @bp: driver handle 369 * @cl_id: client id 370 * @rx_mode_flags: rx mode configuration 371 * @rx_accept_flags: rx accept configuration 372 * @tx_accept_flags: tx accept configuration (tx switch) 373 * @ramrod_flags: ramrod configuration 374 */ 375void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 376 unsigned long rx_mode_flags, 377 unsigned long rx_accept_flags, 378 unsigned long tx_accept_flags, 379 unsigned long ramrod_flags); 380 381/* Parity errors related */ 382void bnx2x_set_pf_load(struct bnx2x *bp); 383bool bnx2x_clear_pf_load(struct bnx2x *bp); 384bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 385bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 386void bnx2x_set_reset_in_progress(struct bnx2x *bp); 387void bnx2x_set_reset_global(struct bnx2x *bp); 388void bnx2x_disable_close_the_gate(struct bnx2x *bp); 389 390/** 391 * bnx2x_sp_event - handle ramrods completion. 392 * 393 * @fp: fastpath handle for the event 394 * @rr_cqe: eth_rx_cqe 395 */ 396void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 397 398/** 399 * bnx2x_ilt_set_info - prepare ILT configurations. 400 * 401 * @bp: driver handle 402 */ 403void bnx2x_ilt_set_info(struct bnx2x *bp); 404 405/** 406 * bnx2x_dcbx_init - initialize dcbx protocol. 407 * 408 * @bp: driver handle 409 */ 410void bnx2x_dcbx_init(struct bnx2x *bp); 411 412/** 413 * bnx2x_set_power_state - set power state to the requested value. 414 * 415 * @bp: driver handle 416 * @state: required state D0 or D3hot 417 * 418 * Currently only D0 and D3hot are supported. 419 */ 420int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 421 422/** 423 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. 424 * 425 * @bp: driver handle 426 * @value: new value 427 */ 428void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 429/* Error handling */ 430void bnx2x_panic_dump(struct bnx2x *bp); 431 432void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 433 434/* dev_close main block */ 435int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 436 437/* dev_open main block */ 438int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 439 440/* hard_xmit callback */ 441netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 442 443/* setup_tc callback */ 444int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 445 446/* select_queue callback */ 447u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 448 449/* reload helper */ 450int bnx2x_reload_if_running(struct net_device *dev); 451 452int bnx2x_change_mac_addr(struct net_device *dev, void *p); 453 454/* NAPI poll Rx part */ 455int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); 456 457void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, 458 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); 459 460/* NAPI poll Tx part */ 461int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 462 463/* suspend/resume callbacks */ 464int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 465int bnx2x_resume(struct pci_dev *pdev); 466 467/* Release IRQ vectors */ 468void bnx2x_free_irq(struct bnx2x *bp); 469 470void bnx2x_free_fp_mem(struct bnx2x *bp); 471int bnx2x_alloc_fp_mem(struct bnx2x *bp); 472void bnx2x_init_rx_rings(struct bnx2x *bp); 473void bnx2x_free_skbs(struct bnx2x *bp); 474void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 475void bnx2x_netif_start(struct bnx2x *bp); 476 477/** 478 * bnx2x_enable_msix - set msix configuration. 479 * 480 * @bp: driver handle 481 * 482 * fills msix_table, requests vectors, updates num_queues 483 * according to number of available vectors. 484 */ 485int bnx2x_enable_msix(struct bnx2x *bp); 486 487/** 488 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 489 * 490 * @bp: driver handle 491 */ 492int bnx2x_enable_msi(struct bnx2x *bp); 493 494/** 495 * bnx2x_poll - NAPI callback 496 * 497 * @napi: napi structure 498 * @budget: 499 * 500 */ 501int bnx2x_poll(struct napi_struct *napi, int budget); 502 503/** 504 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 505 * 506 * @bp: driver handle 507 */ 508int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); 509 510/** 511 * bnx2x_free_mem_bp - release memories outsize main driver structure 512 * 513 * @bp: driver handle 514 */ 515void bnx2x_free_mem_bp(struct bnx2x *bp); 516 517/** 518 * bnx2x_change_mtu - change mtu netdev callback 519 * 520 * @dev: net device 521 * @new_mtu: requested mtu 522 * 523 */ 524int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 525 526#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 527/** 528 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 529 * 530 * @dev: net_device 531 * @wwn: output buffer 532 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) 533 * 534 */ 535int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 536#endif 537 538netdev_features_t bnx2x_fix_features(struct net_device *dev, 539 netdev_features_t features); 540int bnx2x_set_features(struct net_device *dev, netdev_features_t features); 541 542/** 543 * bnx2x_tx_timeout - tx timeout netdev callback 544 * 545 * @dev: net device 546 */ 547void bnx2x_tx_timeout(struct net_device *dev); 548 549/*********************** Inlines **********************************/ 550/*********************** Fast path ********************************/ 551static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 552{ 553 barrier(); /* status block is written to by the chip */ 554 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; 555} 556 557static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, 558 struct bnx2x_fastpath *fp, u16 bd_prod, 559 u16 rx_comp_prod, u16 rx_sge_prod, u32 start) 560{ 561 struct ustorm_eth_rx_producers rx_prods = {0}; 562 u32 i; 563 564 /* Update producers */ 565 rx_prods.bd_prod = bd_prod; 566 rx_prods.cqe_prod = rx_comp_prod; 567 rx_prods.sge_prod = rx_sge_prod; 568 569 /* 570 * Make sure that the BD and SGE data is updated before updating the 571 * producers since FW might read the BD/SGE right after the producer 572 * is updated. 573 * This is only applicable for weak-ordered memory model archs such 574 * as IA-64. The following barrier is also mandatory since FW will 575 * assumes BDs must have buffers. 576 */ 577 wmb(); 578 579 for (i = 0; i < sizeof(rx_prods)/4; i++) 580 REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); 581 582 mmiowb(); /* keep prod updates ordered */ 583 584 DP(NETIF_MSG_RX_STATUS, 585 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", 586 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 587} 588 589static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, 590 u8 segment, u16 index, u8 op, 591 u8 update, u32 igu_addr) 592{ 593 struct igu_regular cmd_data = {0}; 594 595 cmd_data.sb_id_and_flags = 596 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 597 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 598 (update << IGU_REGULAR_BUPDATE_SHIFT) | 599 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 600 601 DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 602 cmd_data.sb_id_and_flags, igu_addr); 603 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); 604 605 /* Make sure that ACK is written */ 606 mmiowb(); 607 barrier(); 608} 609 610static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, 611 u8 idu_sb_id, bool is_Pf) 612{ 613 u32 data, ctl, cnt = 100; 614 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 615 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 616 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; 617 u32 sb_bit = 1 << (idu_sb_id%32); 618 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; 619 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; 620 621 /* Not supported in BC mode */ 622 if (CHIP_INT_MODE_IS_BC(bp)) 623 return; 624 625 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 626 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | 627 IGU_REGULAR_CLEANUP_SET | 628 IGU_REGULAR_BCLEANUP; 629 630 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 631 func_encode << IGU_CTRL_REG_FID_SHIFT | 632 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 633 634 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 635 data, igu_addr_data); 636 REG_WR(bp, igu_addr_data, data); 637 mmiowb(); 638 barrier(); 639 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 640 ctl, igu_addr_ctl); 641 REG_WR(bp, igu_addr_ctl, ctl); 642 mmiowb(); 643 barrier(); 644 645 /* wait for clean up to finish */ 646 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt) 647 msleep(20); 648 649 650 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) { 651 DP(NETIF_MSG_HW, 652 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n", 653 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); 654 } 655} 656 657static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 658 u8 storm, u16 index, u8 op, u8 update) 659{ 660 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 661 COMMAND_REG_INT_ACK); 662 struct igu_ack_register igu_ack; 663 664 igu_ack.status_block_index = index; 665 igu_ack.sb_id_and_flags = 666 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 667 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 668 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 669 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 670 671 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); 672 673 /* Make sure that ACK is written */ 674 mmiowb(); 675 barrier(); 676} 677 678static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, 679 u16 index, u8 op, u8 update) 680{ 681 if (bp->common.int_block == INT_BLOCK_HC) 682 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); 683 else { 684 u8 segment; 685 686 if (CHIP_INT_MODE_IS_BC(bp)) 687 segment = storm; 688 else if (igu_sb_id != bp->igu_dsb_id) 689 segment = IGU_SEG_ACCESS_DEF; 690 else if (storm == ATTENTION_ID) 691 segment = IGU_SEG_ACCESS_ATTN; 692 else 693 segment = IGU_SEG_ACCESS_DEF; 694 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); 695 } 696} 697 698static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) 699{ 700 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 701 COMMAND_REG_SIMD_MASK); 702 u32 result = REG_RD(bp, hc_addr); 703 704 barrier(); 705 return result; 706} 707 708static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) 709{ 710 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); 711 u32 result = REG_RD(bp, igu_addr); 712 713 DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", 714 result, igu_addr); 715 716 barrier(); 717 return result; 718} 719 720static inline u16 bnx2x_ack_int(struct bnx2x *bp) 721{ 722 barrier(); 723 if (bp->common.int_block == INT_BLOCK_HC) 724 return bnx2x_hc_ack_int(bp); 725 else 726 return bnx2x_igu_ack_int(bp); 727} 728 729static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) 730{ 731 /* Tell compiler that consumer and producer can change */ 732 barrier(); 733 return txdata->tx_pkt_prod != txdata->tx_pkt_cons; 734} 735 736static inline u16 bnx2x_tx_avail(struct bnx2x *bp, 737 struct bnx2x_fp_txdata *txdata) 738{ 739 s16 used; 740 u16 prod; 741 u16 cons; 742 743 prod = txdata->tx_bd_prod; 744 cons = txdata->tx_bd_cons; 745 746 /* NUM_TX_RINGS = number of "next-page" entries 747 It will be used as a threshold */ 748 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; 749 750#ifdef BNX2X_STOP_ON_ERROR 751 WARN_ON(used < 0); 752 WARN_ON(used > bp->tx_ring_size); 753 WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); 754#endif 755 756 return (s16)(bp->tx_ring_size) - used; 757} 758 759static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) 760{ 761 u16 hw_cons; 762 763 /* Tell compiler that status block fields can change */ 764 barrier(); 765 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 766 return hw_cons != txdata->tx_pkt_cons; 767} 768 769static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 770{ 771 u8 cos; 772 for_each_cos_in_tx_queue(fp, cos) 773 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 774 return true; 775 return false; 776} 777 778static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 779{ 780 u16 rx_cons_sb; 781 782 /* Tell compiler that status block fields can change */ 783 barrier(); 784 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 785 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 786 rx_cons_sb++; 787 return (fp->rx_comp_cons != rx_cons_sb); 788} 789 790/** 791 * bnx2x_tx_disable - disables tx from stack point of view 792 * 793 * @bp: driver handle 794 */ 795static inline void bnx2x_tx_disable(struct bnx2x *bp) 796{ 797 netif_tx_disable(bp->dev); 798 netif_carrier_off(bp->dev); 799} 800 801static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 802 struct bnx2x_fastpath *fp, u16 index) 803{ 804 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 805 struct page *page = sw_buf->page; 806 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 807 808 /* Skip "next page" elements */ 809 if (!page) 810 return; 811 812 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 813 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 814 __free_pages(page, PAGES_PER_SGE_SHIFT); 815 816 sw_buf->page = NULL; 817 sge->addr_hi = 0; 818 sge->addr_lo = 0; 819} 820 821static inline void bnx2x_add_all_napi(struct bnx2x *bp) 822{ 823 int i; 824 825 /* Add NAPI objects */ 826 for_each_rx_queue(bp, i) 827 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 828 bnx2x_poll, BNX2X_NAPI_WEIGHT); 829} 830 831static inline void bnx2x_del_all_napi(struct bnx2x *bp) 832{ 833 int i; 834 835 for_each_rx_queue(bp, i) 836 netif_napi_del(&bnx2x_fp(bp, i, napi)); 837} 838 839static inline void bnx2x_disable_msi(struct bnx2x *bp) 840{ 841 if (bp->flags & USING_MSIX_FLAG) { 842 pci_disable_msix(bp->pdev); 843 bp->flags &= ~USING_MSIX_FLAG; 844 } else if (bp->flags & USING_MSI_FLAG) { 845 pci_disable_msi(bp->pdev); 846 bp->flags &= ~USING_MSI_FLAG; 847 } 848} 849 850static inline int bnx2x_calc_num_queues(struct bnx2x *bp) 851{ 852 return num_queues ? 853 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 854 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); 855} 856 857static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 858{ 859 int i, j; 860 861 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 862 int idx = RX_SGE_CNT * i - 1; 863 864 for (j = 0; j < 2; j++) { 865 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 866 idx--; 867 } 868 } 869} 870 871static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) 872{ 873 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ 874 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 875 876 /* Clear the two last indices in the page to 1: 877 these are the indices that correspond to the "next" element, 878 hence will never be indicated and should be removed from 879 the calculations. */ 880 bnx2x_clear_sge_mask_next_elems(fp); 881} 882 883static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, 884 struct bnx2x_fastpath *fp, u16 index) 885{ 886 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT); 887 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 888 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 889 dma_addr_t mapping; 890 891 if (unlikely(page == NULL)) { 892 BNX2X_ERR("Can't alloc sge\n"); 893 return -ENOMEM; 894 } 895 896 mapping = dma_map_page(&bp->pdev->dev, page, 0, 897 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 898 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 899 __free_pages(page, PAGES_PER_SGE_SHIFT); 900 BNX2X_ERR("Can't map sge\n"); 901 return -ENOMEM; 902 } 903 904 sw_buf->page = page; 905 dma_unmap_addr_set(sw_buf, mapping, mapping); 906 907 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 908 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 909 910 return 0; 911} 912 913static inline int bnx2x_alloc_rx_data(struct bnx2x *bp, 914 struct bnx2x_fastpath *fp, u16 index) 915{ 916 u8 *data; 917 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index]; 918 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 919 dma_addr_t mapping; 920 921 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 922 if (unlikely(data == NULL)) 923 return -ENOMEM; 924 925 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, 926 fp->rx_buf_size, 927 DMA_FROM_DEVICE); 928 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 929 kfree(data); 930 BNX2X_ERR("Can't map rx data\n"); 931 return -ENOMEM; 932 } 933 934 rx_buf->data = data; 935 dma_unmap_addr_set(rx_buf, mapping, mapping); 936 937 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 938 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 939 940 return 0; 941} 942 943/* note that we are not allocating a new buffer, 944 * we are just moving one from cons to prod 945 * we are not creating a new mapping, 946 * so there is no need to check for dma_mapping_error(). 947 */ 948static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, 949 u16 cons, u16 prod) 950{ 951 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 952 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 953 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 954 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 955 956 dma_unmap_addr_set(prod_rx_buf, mapping, 957 dma_unmap_addr(cons_rx_buf, mapping)); 958 prod_rx_buf->data = cons_rx_buf->data; 959 *prod_bd = *cons_bd; 960} 961 962/************************* Init ******************************************/ 963 964/** 965 * bnx2x_func_start - init function 966 * 967 * @bp: driver handle 968 * 969 * Must be called before sending CLIENT_SETUP for the first client. 970 */ 971static inline int bnx2x_func_start(struct bnx2x *bp) 972{ 973 struct bnx2x_func_state_params func_params = {0}; 974 struct bnx2x_func_start_params *start_params = 975 &func_params.params.start; 976 977 /* Prepare parameters for function state transitions */ 978 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 979 980 func_params.f_obj = &bp->func_obj; 981 func_params.cmd = BNX2X_F_CMD_START; 982 983 /* Function parameters */ 984 start_params->mf_mode = bp->mf_mode; 985 start_params->sd_vlan_tag = bp->mf_ov; 986 987 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) 988 start_params->network_cos_mode = STATIC_COS; 989 else /* CHIP_IS_E1X */ 990 start_params->network_cos_mode = FW_WRR; 991 992 return bnx2x_func_state_change(bp, &func_params); 993} 994 995 996/** 997 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format 998 * 999 * @fw_hi: pointer to upper part 1000 * @fw_mid: pointer to middle part 1001 * @fw_lo: pointer to lower part 1002 * @mac: pointer to MAC address 1003 */ 1004static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, 1005 u8 *mac) 1006{ 1007 ((u8 *)fw_hi)[0] = mac[1]; 1008 ((u8 *)fw_hi)[1] = mac[0]; 1009 ((u8 *)fw_mid)[0] = mac[3]; 1010 ((u8 *)fw_mid)[1] = mac[2]; 1011 ((u8 *)fw_lo)[0] = mac[5]; 1012 ((u8 *)fw_lo)[1] = mac[4]; 1013} 1014 1015static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 1016 struct bnx2x_fastpath *fp, int last) 1017{ 1018 int i; 1019 1020 if (fp->disable_tpa) 1021 return; 1022 1023 for (i = 0; i < last; i++) 1024 bnx2x_free_rx_sge(bp, fp, i); 1025} 1026 1027static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, 1028 struct bnx2x_fastpath *fp, int last) 1029{ 1030 int i; 1031 1032 for (i = 0; i < last; i++) { 1033 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; 1034 struct sw_rx_bd *first_buf = &tpa_info->first_buf; 1035 u8 *data = first_buf->data; 1036 1037 if (data == NULL) { 1038 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); 1039 continue; 1040 } 1041 if (tpa_info->tpa_state == BNX2X_TPA_START) 1042 dma_unmap_single(&bp->pdev->dev, 1043 dma_unmap_addr(first_buf, mapping), 1044 fp->rx_buf_size, DMA_FROM_DEVICE); 1045 kfree(data); 1046 first_buf->data = NULL; 1047 } 1048} 1049 1050static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) 1051{ 1052 int i; 1053 1054 for (i = 1; i <= NUM_TX_RINGS; i++) { 1055 struct eth_tx_next_bd *tx_next_bd = 1056 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; 1057 1058 tx_next_bd->addr_hi = 1059 cpu_to_le32(U64_HI(txdata->tx_desc_mapping + 1060 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1061 tx_next_bd->addr_lo = 1062 cpu_to_le32(U64_LO(txdata->tx_desc_mapping + 1063 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 1064 } 1065 1066 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); 1067 txdata->tx_db.data.zero_fill1 = 0; 1068 txdata->tx_db.data.prod = 0; 1069 1070 txdata->tx_pkt_prod = 0; 1071 txdata->tx_pkt_cons = 0; 1072 txdata->tx_bd_prod = 0; 1073 txdata->tx_bd_cons = 0; 1074 txdata->tx_pkt = 0; 1075} 1076 1077static inline void bnx2x_init_tx_rings(struct bnx2x *bp) 1078{ 1079 int i; 1080 u8 cos; 1081 1082 for_each_tx_queue(bp, i) 1083 for_each_cos_in_tx_queue(&bp->fp[i], cos) 1084 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); 1085} 1086 1087static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1088{ 1089 int i; 1090 1091 for (i = 1; i <= NUM_RX_RINGS; i++) { 1092 struct eth_rx_bd *rx_bd; 1093 1094 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; 1095 rx_bd->addr_hi = 1096 cpu_to_le32(U64_HI(fp->rx_desc_mapping + 1097 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1098 rx_bd->addr_lo = 1099 cpu_to_le32(U64_LO(fp->rx_desc_mapping + 1100 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1101 } 1102} 1103 1104static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp) 1105{ 1106 int i; 1107 1108 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 1109 struct eth_rx_sge *sge; 1110 1111 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2]; 1112 sge->addr_hi = 1113 cpu_to_le32(U64_HI(fp->rx_sge_mapping + 1114 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1115 1116 sge->addr_lo = 1117 cpu_to_le32(U64_LO(fp->rx_sge_mapping + 1118 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES))); 1119 } 1120} 1121 1122static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp) 1123{ 1124 int i; 1125 for (i = 1; i <= NUM_RCQ_RINGS; i++) { 1126 struct eth_rx_cqe_next_page *nextpg; 1127 1128 nextpg = (struct eth_rx_cqe_next_page *) 1129 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1]; 1130 nextpg->addr_hi = 1131 cpu_to_le32(U64_HI(fp->rx_comp_mapping + 1132 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 1133 nextpg->addr_lo = 1134 cpu_to_le32(U64_LO(fp->rx_comp_mapping + 1135 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS))); 1136 } 1137} 1138 1139/* Returns the number of actually allocated BDs */ 1140static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, 1141 int rx_ring_size) 1142{ 1143 struct bnx2x *bp = fp->bp; 1144 u16 ring_prod, cqe_ring_prod; 1145 int i, failure_cnt = 0; 1146 1147 fp->rx_comp_cons = 0; 1148 cqe_ring_prod = ring_prod = 0; 1149 1150 /* This routine is called only during fo init so 1151 * fp->eth_q_stats.rx_skb_alloc_failed = 0 1152 */ 1153 for (i = 0; i < rx_ring_size; i++) { 1154 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) { 1155 failure_cnt++; 1156 continue; 1157 } 1158 ring_prod = NEXT_RX_IDX(ring_prod); 1159 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); 1160 WARN_ON(ring_prod <= (i - failure_cnt)); 1161 } 1162 1163 if (failure_cnt) 1164 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n", 1165 i - failure_cnt, fp->index); 1166 1167 fp->rx_bd_prod = ring_prod; 1168 /* Limit the CQE producer by the CQE ring size */ 1169 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, 1170 cqe_ring_prod); 1171 fp->rx_pkt = fp->rx_calls = 0; 1172 1173 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; 1174 1175 return i - failure_cnt; 1176} 1177 1178/* Statistics ID are global per chip/path, while Client IDs for E1x are per 1179 * port. 1180 */ 1181static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) 1182{ 1183 struct bnx2x *bp = fp->bp; 1184 if (!CHIP_IS_E1x(bp)) { 1185#ifdef BCM_CNIC 1186 /* there are special statistics counters for FCoE 136..140 */ 1187 if (IS_FCOE_FP(fp)) 1188 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1189#endif 1190 return fp->cl_id; 1191 } 1192 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1193} 1194 1195static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, 1196 bnx2x_obj_type obj_type) 1197{ 1198 struct bnx2x *bp = fp->bp; 1199 1200 /* Configure classification DBs */ 1201 bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, 1202 BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 1203 bnx2x_sp_mapping(bp, mac_rdata), 1204 BNX2X_FILTER_MAC_PENDING, 1205 &bp->sp_state, obj_type, 1206 &bp->macs_pool); 1207} 1208 1209/** 1210 * bnx2x_get_path_func_num - get number of active functions 1211 * 1212 * @bp: driver handle 1213 * 1214 * Calculates the number of active (not hidden) functions on the 1215 * current path. 1216 */ 1217static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) 1218{ 1219 u8 func_num = 0, i; 1220 1221 /* 57710 has only one function per-port */ 1222 if (CHIP_IS_E1(bp)) 1223 return 1; 1224 1225 /* Calculate a number of functions enabled on the current 1226 * PATH/PORT. 1227 */ 1228 if (CHIP_REV_IS_SLOW(bp)) { 1229 if (IS_MF(bp)) 1230 func_num = 4; 1231 else 1232 func_num = 2; 1233 } else { 1234 for (i = 0; i < E1H_FUNC_MAX / 2; i++) { 1235 u32 func_config = 1236 MF_CFG_RD(bp, 1237 func_mf_config[BP_PORT(bp) + 2 * i]. 1238 config); 1239 func_num += 1240 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); 1241 } 1242 } 1243 1244 WARN_ON(!func_num); 1245 1246 return func_num; 1247} 1248 1249static inline void bnx2x_init_bp_objs(struct bnx2x *bp) 1250{ 1251 /* RX_MODE controlling object */ 1252 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); 1253 1254 /* multicast configuration controlling object */ 1255 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, 1256 BP_FUNC(bp), BP_FUNC(bp), 1257 bnx2x_sp(bp, mcast_rdata), 1258 bnx2x_sp_mapping(bp, mcast_rdata), 1259 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, 1260 BNX2X_OBJ_TYPE_RX); 1261 1262 /* Setup CAM credit pools */ 1263 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), 1264 bnx2x_get_path_func_num(bp)); 1265 1266 /* RSS configuration object */ 1267 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, 1268 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), 1269 bnx2x_sp(bp, rss_rdata), 1270 bnx2x_sp_mapping(bp, rss_rdata), 1271 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, 1272 BNX2X_OBJ_TYPE_RX); 1273} 1274 1275static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 1276{ 1277 if (CHIP_IS_E1x(fp->bp)) 1278 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; 1279 else 1280 return fp->cl_id; 1281} 1282 1283static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 1284{ 1285 struct bnx2x *bp = fp->bp; 1286 1287 if (!CHIP_IS_E1x(bp)) 1288 return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 1289 else 1290 return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 1291} 1292 1293static inline void bnx2x_init_txdata(struct bnx2x *bp, 1294 struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, 1295 __le16 *tx_cons_sb) 1296{ 1297 txdata->cid = cid; 1298 txdata->txq_index = txq_index; 1299 txdata->tx_cons_sb = tx_cons_sb; 1300 1301 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1302 txdata->cid, txdata->txq_index); 1303} 1304 1305#ifdef BCM_CNIC 1306static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1307{ 1308 return bp->cnic_base_cl_id + cl_idx + 1309 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; 1310} 1311 1312static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1313{ 1314 1315 /* the 'first' id is allocated for the cnic */ 1316 return bp->base_fw_ndsb; 1317} 1318 1319static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) 1320{ 1321 return bp->igu_base_sb; 1322} 1323 1324 1325static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1326{ 1327 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 1328 unsigned long q_type = 0; 1329 1330 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1331 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1332 BNX2X_FCOE_ETH_CL_ID_IDX); 1333 /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than 1334 * 16 ETH clients per function when CNIC is enabled! 1335 * 1336 * Fix it ASAP!!! 1337 */ 1338 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; 1339 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1340 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1341 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1342 1343 bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), 1344 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); 1345 1346 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 1347 1348 /* qZone id equals to FW (per path) client id */ 1349 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 1350 /* init shortcut */ 1351 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 1352 bnx2x_rx_ustorm_prods_offset(fp); 1353 1354 /* Configure Queue State object */ 1355 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1356 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1357 1358 /* No multi-CoS for FCoE L2 client */ 1359 BUG_ON(fp->max_cos != 1); 1360 1361 bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, 1362 BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1363 bnx2x_sp_mapping(bp, q_rdata), q_type); 1364 1365 DP(NETIF_MSG_IFUP, 1366 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 1367 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1368 fp->igu_sb_id); 1369} 1370#endif 1371 1372static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1373 struct bnx2x_fp_txdata *txdata) 1374{ 1375 int cnt = 1000; 1376 1377 while (bnx2x_has_tx_work_unload(txdata)) { 1378 if (!cnt) { 1379 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", 1380 txdata->txq_index, txdata->tx_pkt_prod, 1381 txdata->tx_pkt_cons); 1382#ifdef BNX2X_STOP_ON_ERROR 1383 bnx2x_panic(); 1384 return -EBUSY; 1385#else 1386 break; 1387#endif 1388 } 1389 cnt--; 1390 usleep_range(1000, 1000); 1391 } 1392 1393 return 0; 1394} 1395 1396int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1397 1398static inline void __storm_memset_struct(struct bnx2x *bp, 1399 u32 addr, size_t size, u32 *data) 1400{ 1401 int i; 1402 for (i = 0; i < size/4; i++) 1403 REG_WR(bp, addr + (i * 4), data[i]); 1404} 1405 1406static inline void storm_memset_func_cfg(struct bnx2x *bp, 1407 struct tstorm_eth_function_common_config *tcfg, 1408 u16 abs_fid) 1409{ 1410 size_t size = sizeof(struct tstorm_eth_function_common_config); 1411 1412 u32 addr = BAR_TSTRORM_INTMEM + 1413 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); 1414 1415 __storm_memset_struct(bp, addr, size, (u32 *)tcfg); 1416} 1417 1418static inline void storm_memset_cmng(struct bnx2x *bp, 1419 struct cmng_struct_per_port *cmng, 1420 u8 port) 1421{ 1422 size_t size = sizeof(struct cmng_struct_per_port); 1423 1424 u32 addr = BAR_XSTRORM_INTMEM + 1425 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); 1426 1427 __storm_memset_struct(bp, addr, size, (u32 *)cmng); 1428} 1429 1430/** 1431 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1432 * 1433 * @bp: driver handle 1434 * @mask: bits that need to be cleared 1435 */ 1436static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) 1437{ 1438 int tout = 5000; /* Wait for 5 secs tops */ 1439 1440 while (tout--) { 1441 smp_mb(); 1442 netif_addr_lock_bh(bp->dev); 1443 if (!(bp->sp_state & mask)) { 1444 netif_addr_unlock_bh(bp->dev); 1445 return true; 1446 } 1447 netif_addr_unlock_bh(bp->dev); 1448 1449 usleep_range(1000, 1000); 1450 } 1451 1452 smp_mb(); 1453 1454 netif_addr_lock_bh(bp->dev); 1455 if (bp->sp_state & mask) { 1456 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", 1457 bp->sp_state, mask); 1458 netif_addr_unlock_bh(bp->dev); 1459 return false; 1460 } 1461 netif_addr_unlock_bh(bp->dev); 1462 1463 return true; 1464} 1465 1466/** 1467 * bnx2x_set_ctx_validation - set CDU context validation values 1468 * 1469 * @bp: driver handle 1470 * @cxt: context of the connection on the host memory 1471 * @cid: SW CID of the connection to be configured 1472 */ 1473void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1474 u32 cid); 1475 1476void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1477 u8 sb_index, u8 disable, u16 usec); 1478void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1479void bnx2x_release_phy_lock(struct bnx2x *bp); 1480 1481/** 1482 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1483 * 1484 * @bp: driver handle 1485 * @mf_cfg: MF configuration 1486 * 1487 */ 1488static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1489{ 1490 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1491 FUNC_MF_CFG_MAX_BW_SHIFT; 1492 if (!max_cfg) { 1493 DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, 1494 "Max BW configured to 0 - using 100 instead\n"); 1495 max_cfg = 100; 1496 } 1497 return max_cfg; 1498} 1499 1500/* checks if HW supports GRO for given MTU */ 1501static inline bool bnx2x_mtu_allows_gro(int mtu) 1502{ 1503 /* gro frags per page */ 1504 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); 1505 1506 /* 1507 * 1. number of frags should not grow above MAX_SKB_FRAGS 1508 * 2. frag must fit the page 1509 */ 1510 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1511} 1512 1513static inline bool bnx2x_need_gro_check(int mtu) 1514{ 1515 return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) != 1516 (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1)); 1517} 1518 1519/** 1520 * bnx2x_bz_fp - zero content of the fastpath structure. 1521 * 1522 * @bp: driver handle 1523 * @index: fastpath index to be zeroed 1524 * 1525 * Makes sure the contents of the bp->fp[index].napi is kept 1526 * intact. 1527 */ 1528static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) 1529{ 1530 struct bnx2x_fastpath *fp = &bp->fp[index]; 1531 struct napi_struct orig_napi = fp->napi; 1532 /* bzero bnx2x_fastpath contents */ 1533 if (bp->stats_init) 1534 memset(fp, 0, sizeof(*fp)); 1535 else { 1536 /* Keep Queue statistics */ 1537 struct bnx2x_eth_q_stats *tmp_eth_q_stats; 1538 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; 1539 1540 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), 1541 GFP_KERNEL); 1542 if (tmp_eth_q_stats) 1543 memcpy(tmp_eth_q_stats, &fp->eth_q_stats, 1544 sizeof(struct bnx2x_eth_q_stats)); 1545 1546 tmp_eth_q_stats_old = 1547 kzalloc(sizeof(struct bnx2x_eth_q_stats_old), 1548 GFP_KERNEL); 1549 if (tmp_eth_q_stats_old) 1550 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, 1551 sizeof(struct bnx2x_eth_q_stats_old)); 1552 1553 memset(fp, 0, sizeof(*fp)); 1554 1555 if (tmp_eth_q_stats) { 1556 memcpy(&fp->eth_q_stats, tmp_eth_q_stats, 1557 sizeof(struct bnx2x_eth_q_stats)); 1558 kfree(tmp_eth_q_stats); 1559 } 1560 1561 if (tmp_eth_q_stats_old) { 1562 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, 1563 sizeof(struct bnx2x_eth_q_stats_old)); 1564 kfree(tmp_eth_q_stats_old); 1565 } 1566 1567 } 1568 1569 /* Restore the NAPI object as it has been already initialized */ 1570 fp->napi = orig_napi; 1571 1572 fp->bp = bp; 1573 fp->index = index; 1574 if (IS_ETH_FP(fp)) 1575 fp->max_cos = bp->max_cos; 1576 else 1577 /* Special queues support only one CoS */ 1578 fp->max_cos = 1; 1579 1580 /* 1581 * set the tpa flag for each queue. The tpa flag determines the queue 1582 * minimal size so it must be set prior to queue memory allocation 1583 */ 1584 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || 1585 (bp->flags & GRO_ENABLE_FLAG && 1586 bnx2x_mtu_allows_gro(bp->dev->mtu))); 1587 if (bp->flags & TPA_ENABLE_FLAG) 1588 fp->mode = TPA_MODE_LRO; 1589 else if (bp->flags & GRO_ENABLE_FLAG) 1590 fp->mode = TPA_MODE_GRO; 1591 1592#ifdef BCM_CNIC 1593 /* We don't want TPA on an FCoE L2 ring */ 1594 if (IS_FCOE_FP(fp)) 1595 fp->disable_tpa = 1; 1596#endif 1597} 1598 1599/** 1600 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1601 * 1602 * @bp: driver handle 1603 * 1604 */ 1605void bnx2x_get_iscsi_info(struct bnx2x *bp); 1606 1607/* returns func by VN for current port */ 1608static inline int func_by_vn(struct bnx2x *bp, int vn) 1609{ 1610 return 2 * vn + BP_PORT(bp); 1611} 1612 1613/** 1614 * bnx2x_link_sync_notify - send notification to other functions. 1615 * 1616 * @bp: driver handle 1617 * 1618 */ 1619static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 1620{ 1621 int func; 1622 int vn; 1623 1624 /* Set the attention towards other drivers on the same port */ 1625 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 1626 if (vn == BP_VN(bp)) 1627 continue; 1628 1629 func = func_by_vn(bp, vn); 1630 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 1631 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 1632 } 1633} 1634 1635/** 1636 * bnx2x_update_drv_flags - update flags in shmem 1637 * 1638 * @bp: driver handle 1639 * @flags: flags to update 1640 * @set: set or clear 1641 * 1642 */ 1643static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) 1644{ 1645 if (SHMEM2_HAS(bp, drv_flags)) { 1646 u32 drv_flags; 1647 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1648 drv_flags = SHMEM2_RD(bp, drv_flags); 1649 1650 if (set) 1651 SET_FLAGS(drv_flags, flags); 1652 else 1653 RESET_FLAGS(drv_flags, flags); 1654 1655 SHMEM2_WR(bp, drv_flags, drv_flags); 1656 DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); 1657 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1658 } 1659} 1660 1661static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1662{ 1663 if (is_valid_ether_addr(addr)) 1664 return true; 1665#ifdef BCM_CNIC 1666 if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp)) 1667 return true; 1668#endif 1669 return false; 1670} 1671 1672#endif /* BNX2X_CMN_H */ 1673