bnx2x_cmn.h revision 8ca5e17e58c953b9a9dbd4974c554b25c6d70b1a
1/* bnx2x_cmn.h: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17#ifndef BNX2X_CMN_H 18#define BNX2X_CMN_H 19 20#include <linux/types.h> 21#include <linux/pci.h> 22#include <linux/netdevice.h> 23#include <linux/etherdevice.h> 24 25 26#include "bnx2x.h" 27 28/* This is used as a replacement for an MCP if it's not present */ 29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 30 31extern int num_queues; 32extern int int_mode; 33 34/************************ Macros ********************************/ 35#define BNX2X_PCI_FREE(x, y, size) \ 36 do { \ 37 if (x) { \ 38 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 39 x = NULL; \ 40 y = 0; \ 41 } \ 42 } while (0) 43 44#define BNX2X_FREE(x) \ 45 do { \ 46 if (x) { \ 47 kfree((void *)x); \ 48 x = NULL; \ 49 } \ 50 } while (0) 51 52#define BNX2X_PCI_ALLOC(x, y, size) \ 53 do { \ 54 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 55 if (x == NULL) \ 56 goto alloc_mem_err; \ 57 memset((void *)x, 0, size); \ 58 } while (0) 59 60#define BNX2X_ALLOC(x, size) \ 61 do { \ 62 x = kzalloc(size, GFP_KERNEL); \ 63 if (x == NULL) \ 64 goto alloc_mem_err; \ 65 } while (0) 66 67/*********************** Interfaces **************************** 68 * Functions that need to be implemented by each driver version 69 */ 70/* Init */ 71 72/** 73 * bnx2x_send_unload_req - request unload mode from the MCP. 74 * 75 * @bp: driver handle 76 * @unload_mode: requested function's unload mode 77 * 78 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 79 */ 80u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 81 82/** 83 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 84 * 85 * @bp: driver handle 86 * @keep_link: true iff link should be kept up 87 */ 88void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); 89 90/** 91 * bnx2x_config_rss_pf - configure RSS parameters in a PF. 92 * 93 * @bp: driver handle 94 * @rss_obj: RSS object to use 95 * @ind_table: indirection table to configure 96 * @config_hash: re-configure RSS hash keys configuration 97 */ 98int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 99 bool config_hash); 100 101/** 102 * bnx2x__init_func_obj - init function object 103 * 104 * @bp: driver handle 105 * 106 * Initializes the Function Object with the appropriate 107 * parameters which include a function slow path driver 108 * interface. 109 */ 110void bnx2x__init_func_obj(struct bnx2x *bp); 111 112/** 113 * bnx2x_setup_queue - setup eth queue. 114 * 115 * @bp: driver handle 116 * @fp: pointer to the fastpath structure 117 * @leading: boolean 118 * 119 */ 120int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 121 bool leading); 122 123/** 124 * bnx2x_setup_leading - bring up a leading eth queue. 125 * 126 * @bp: driver handle 127 */ 128int bnx2x_setup_leading(struct bnx2x *bp); 129 130/** 131 * bnx2x_fw_command - send the MCP a request 132 * 133 * @bp: driver handle 134 * @command: request 135 * @param: request's parameter 136 * 137 * block until there is a reply 138 */ 139u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 140 141/** 142 * bnx2x_initial_phy_init - initialize link parameters structure variables. 143 * 144 * @bp: driver handle 145 * @load_mode: current mode 146 */ 147int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 148 149/** 150 * bnx2x_link_set - configure hw according to link parameters structure. 151 * 152 * @bp: driver handle 153 */ 154void bnx2x_link_set(struct bnx2x *bp); 155 156/** 157 * bnx2x_force_link_reset - Forces link reset, and put the PHY 158 * in reset as well. 159 * 160 * @bp: driver handle 161 */ 162void bnx2x_force_link_reset(struct bnx2x *bp); 163 164/** 165 * bnx2x_link_test - query link status. 166 * 167 * @bp: driver handle 168 * @is_serdes: bool 169 * 170 * Returns 0 if link is UP. 171 */ 172u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 173 174/** 175 * bnx2x_drv_pulse - write driver pulse to shmem 176 * 177 * @bp: driver handle 178 * 179 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 180 * in the shmem. 181 */ 182void bnx2x_drv_pulse(struct bnx2x *bp); 183 184/** 185 * bnx2x_igu_ack_sb - update IGU with current SB value 186 * 187 * @bp: driver handle 188 * @igu_sb_id: SB id 189 * @segment: SB segment 190 * @index: SB index 191 * @op: SB operation 192 * @update: is HW update required 193 */ 194void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 195 u16 index, u8 op, u8 update); 196 197/* Disable transactions from chip to host */ 198void bnx2x_pf_disable(struct bnx2x *bp); 199 200/** 201 * bnx2x__link_status_update - handles link status change. 202 * 203 * @bp: driver handle 204 */ 205void bnx2x__link_status_update(struct bnx2x *bp); 206 207/** 208 * bnx2x_link_report - report link status to upper layer. 209 * 210 * @bp: driver handle 211 */ 212void bnx2x_link_report(struct bnx2x *bp); 213 214/* None-atomic version of bnx2x_link_report() */ 215void __bnx2x_link_report(struct bnx2x *bp); 216 217/** 218 * bnx2x_get_mf_speed - calculate MF speed. 219 * 220 * @bp: driver handle 221 * 222 * Takes into account current linespeed and MF configuration. 223 */ 224u16 bnx2x_get_mf_speed(struct bnx2x *bp); 225 226/** 227 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler 228 * 229 * @irq: irq number 230 * @dev_instance: private instance 231 */ 232irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 233 234/** 235 * bnx2x_interrupt - non MSI-X interrupt handler 236 * 237 * @irq: irq number 238 * @dev_instance: private instance 239 */ 240irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 241 242/** 243 * bnx2x_cnic_notify - send command to cnic driver 244 * 245 * @bp: driver handle 246 * @cmd: command 247 */ 248int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 249 250/** 251 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information 252 * 253 * @bp: driver handle 254 */ 255void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 256 257/** 258 * bnx2x_setup_cnic_info - provides cnic with updated info 259 * 260 * @bp: driver handle 261 */ 262void bnx2x_setup_cnic_info(struct bnx2x *bp); 263 264/** 265 * bnx2x_int_enable - enable HW interrupts. 266 * 267 * @bp: driver handle 268 */ 269void bnx2x_int_enable(struct bnx2x *bp); 270 271/** 272 * bnx2x_int_disable_sync - disable interrupts. 273 * 274 * @bp: driver handle 275 * @disable_hw: true, disable HW interrupts. 276 * 277 * This function ensures that there are no 278 * ISRs or SP DPCs (sp_task) are running after it returns. 279 */ 280void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 281 282/** 283 * bnx2x_nic_init_cnic - init driver internals for cnic. 284 * 285 * @bp: driver handle 286 * @load_code: COMMON, PORT or FUNCTION 287 * 288 * Initializes: 289 * - rings 290 * - status blocks 291 * - etc. 292 */ 293void bnx2x_nic_init_cnic(struct bnx2x *bp); 294 295/** 296 * bnx2x_nic_init - init driver internals. 297 * 298 * @bp: driver handle 299 * 300 * Initializes: 301 * - rings 302 * - status blocks 303 * - etc. 304 */ 305void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 306/** 307 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic. 308 * 309 * @bp: driver handle 310 */ 311int bnx2x_alloc_mem_cnic(struct bnx2x *bp); 312/** 313 * bnx2x_alloc_mem - allocate driver's memory. 314 * 315 * @bp: driver handle 316 */ 317int bnx2x_alloc_mem(struct bnx2x *bp); 318 319/** 320 * bnx2x_free_mem_cnic - release driver's memory for cnic. 321 * 322 * @bp: driver handle 323 */ 324void bnx2x_free_mem_cnic(struct bnx2x *bp); 325/** 326 * bnx2x_free_mem - release driver's memory. 327 * 328 * @bp: driver handle 329 */ 330void bnx2x_free_mem(struct bnx2x *bp); 331 332/** 333 * bnx2x_set_num_queues - set number of queues according to mode. 334 * 335 * @bp: driver handle 336 */ 337void bnx2x_set_num_queues(struct bnx2x *bp); 338 339/** 340 * bnx2x_chip_cleanup - cleanup chip internals. 341 * 342 * @bp: driver handle 343 * @unload_mode: COMMON, PORT, FUNCTION 344 * @keep_link: true iff link should be kept up. 345 * 346 * - Cleanup MAC configuration. 347 * - Closes clients. 348 * - etc. 349 */ 350void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); 351 352/** 353 * bnx2x_acquire_hw_lock - acquire HW lock. 354 * 355 * @bp: driver handle 356 * @resource: resource bit which was locked 357 */ 358int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 359 360/** 361 * bnx2x_release_hw_lock - release HW lock. 362 * 363 * @bp: driver handle 364 * @resource: resource bit which was locked 365 */ 366int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 367 368/** 369 * bnx2x_release_leader_lock - release recovery leader lock 370 * 371 * @bp: driver handle 372 */ 373int bnx2x_release_leader_lock(struct bnx2x *bp); 374 375/** 376 * bnx2x_set_eth_mac - configure eth MAC address in the HW 377 * 378 * @bp: driver handle 379 * @set: set or clear 380 * 381 * Configures according to the value in netdev->dev_addr. 382 */ 383int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 384 385/** 386 * bnx2x_set_rx_mode - set MAC filtering configurations. 387 * 388 * @dev: netdevice 389 * 390 * called with netif_tx_lock from dev_mcast.c 391 * If bp->state is OPEN, should be called with 392 * netif_addr_lock_bh() 393 */ 394void bnx2x_set_rx_mode(struct net_device *dev); 395 396/** 397 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 398 * 399 * @bp: driver handle 400 * 401 * If bp->state is OPEN, should be called with 402 * netif_addr_lock_bh(). 403 */ 404void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 405 406/** 407 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. 408 * 409 * @bp: driver handle 410 * @cl_id: client id 411 * @rx_mode_flags: rx mode configuration 412 * @rx_accept_flags: rx accept configuration 413 * @tx_accept_flags: tx accept configuration (tx switch) 414 * @ramrod_flags: ramrod configuration 415 */ 416void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 417 unsigned long rx_mode_flags, 418 unsigned long rx_accept_flags, 419 unsigned long tx_accept_flags, 420 unsigned long ramrod_flags); 421 422/* Parity errors related */ 423void bnx2x_set_pf_load(struct bnx2x *bp); 424bool bnx2x_clear_pf_load(struct bnx2x *bp); 425bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 426bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 427void bnx2x_set_reset_in_progress(struct bnx2x *bp); 428void bnx2x_set_reset_global(struct bnx2x *bp); 429void bnx2x_disable_close_the_gate(struct bnx2x *bp); 430int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 431 432/** 433 * bnx2x_sp_event - handle ramrods completion. 434 * 435 * @fp: fastpath handle for the event 436 * @rr_cqe: eth_rx_cqe 437 */ 438void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 439 440/** 441 * bnx2x_ilt_set_info - prepare ILT configurations. 442 * 443 * @bp: driver handle 444 */ 445void bnx2x_ilt_set_info(struct bnx2x *bp); 446 447/** 448 * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC 449 * and TM. 450 * 451 * @bp: driver handle 452 */ 453void bnx2x_ilt_set_info_cnic(struct bnx2x *bp); 454 455/** 456 * bnx2x_dcbx_init - initialize dcbx protocol. 457 * 458 * @bp: driver handle 459 */ 460void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); 461 462/** 463 * bnx2x_set_power_state - set power state to the requested value. 464 * 465 * @bp: driver handle 466 * @state: required state D0 or D3hot 467 * 468 * Currently only D0 and D3hot are supported. 469 */ 470int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 471 472/** 473 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. 474 * 475 * @bp: driver handle 476 * @value: new value 477 */ 478void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 479/* Error handling */ 480void bnx2x_panic_dump(struct bnx2x *bp); 481 482void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 483 484/* validate currect fw is loaded */ 485bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); 486 487/* dev_close main block */ 488int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); 489 490/* dev_open main block */ 491int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 492 493/* hard_xmit callback */ 494netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 495 496/* setup_tc callback */ 497int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 498 499/* select_queue callback */ 500u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 501 502static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 503 struct bnx2x_fastpath *fp, 504 u16 bd_prod, u16 rx_comp_prod, 505 u16 rx_sge_prod) 506{ 507 struct ustorm_eth_rx_producers rx_prods = {0}; 508 u32 i; 509 510 /* Update producers */ 511 rx_prods.bd_prod = bd_prod; 512 rx_prods.cqe_prod = rx_comp_prod; 513 rx_prods.sge_prod = rx_sge_prod; 514 515 /* Make sure that the BD and SGE data is updated before updating the 516 * producers since FW might read the BD/SGE right after the producer 517 * is updated. 518 * This is only applicable for weak-ordered memory model archs such 519 * as IA-64. The following barrier is also mandatory since FW will 520 * assumes BDs must have buffers. 521 */ 522 wmb(); 523 524 for (i = 0; i < sizeof(rx_prods)/4; i++) 525 REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, 526 ((u32 *)&rx_prods)[i]); 527 528 mmiowb(); /* keep prod updates ordered */ 529 530 DP(NETIF_MSG_RX_STATUS, 531 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", 532 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 533} 534 535/* reload helper */ 536int bnx2x_reload_if_running(struct net_device *dev); 537 538int bnx2x_change_mac_addr(struct net_device *dev, void *p); 539 540/* NAPI poll Rx part */ 541int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); 542 543/* NAPI poll Tx part */ 544int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 545 546/* suspend/resume callbacks */ 547int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 548int bnx2x_resume(struct pci_dev *pdev); 549 550/* Release IRQ vectors */ 551void bnx2x_free_irq(struct bnx2x *bp); 552 553void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); 554void bnx2x_free_fp_mem(struct bnx2x *bp); 555int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); 556int bnx2x_alloc_fp_mem(struct bnx2x *bp); 557void bnx2x_init_rx_rings(struct bnx2x *bp); 558void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); 559void bnx2x_free_skbs_cnic(struct bnx2x *bp); 560void bnx2x_free_skbs(struct bnx2x *bp); 561void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 562void bnx2x_netif_start(struct bnx2x *bp); 563int bnx2x_load_cnic(struct bnx2x *bp); 564 565/** 566 * bnx2x_enable_msix - set msix configuration. 567 * 568 * @bp: driver handle 569 * 570 * fills msix_table, requests vectors, updates num_queues 571 * according to number of available vectors. 572 */ 573int bnx2x_enable_msix(struct bnx2x *bp); 574 575/** 576 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 577 * 578 * @bp: driver handle 579 */ 580int bnx2x_enable_msi(struct bnx2x *bp); 581 582/** 583 * bnx2x_poll - NAPI callback 584 * 585 * @napi: napi structure 586 * @budget: 587 * 588 */ 589int bnx2x_poll(struct napi_struct *napi, int budget); 590 591/** 592 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 593 * 594 * @bp: driver handle 595 */ 596int bnx2x_alloc_mem_bp(struct bnx2x *bp); 597 598/** 599 * bnx2x_free_mem_bp - release memories outsize main driver structure 600 * 601 * @bp: driver handle 602 */ 603void bnx2x_free_mem_bp(struct bnx2x *bp); 604 605/** 606 * bnx2x_change_mtu - change mtu netdev callback 607 * 608 * @dev: net device 609 * @new_mtu: requested mtu 610 * 611 */ 612int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 613 614#ifdef NETDEV_FCOE_WWNN 615/** 616 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 617 * 618 * @dev: net_device 619 * @wwn: output buffer 620 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) 621 * 622 */ 623int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 624#endif 625 626netdev_features_t bnx2x_fix_features(struct net_device *dev, 627 netdev_features_t features); 628int bnx2x_set_features(struct net_device *dev, netdev_features_t features); 629 630/** 631 * bnx2x_tx_timeout - tx timeout netdev callback 632 * 633 * @dev: net device 634 */ 635void bnx2x_tx_timeout(struct net_device *dev); 636 637/*********************** Inlines **********************************/ 638/*********************** Fast path ********************************/ 639static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 640{ 641 barrier(); /* status block is written to by the chip */ 642 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; 643} 644 645static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, 646 u8 segment, u16 index, u8 op, 647 u8 update, u32 igu_addr) 648{ 649 struct igu_regular cmd_data = {0}; 650 651 cmd_data.sb_id_and_flags = 652 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 653 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 654 (update << IGU_REGULAR_BUPDATE_SHIFT) | 655 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 656 657 DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 658 cmd_data.sb_id_and_flags, igu_addr); 659 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); 660 661 /* Make sure that ACK is written */ 662 mmiowb(); 663 barrier(); 664} 665 666static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 667 u8 storm, u16 index, u8 op, u8 update) 668{ 669 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 670 COMMAND_REG_INT_ACK); 671 struct igu_ack_register igu_ack; 672 673 igu_ack.status_block_index = index; 674 igu_ack.sb_id_and_flags = 675 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 676 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 677 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 678 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 679 680 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); 681 682 /* Make sure that ACK is written */ 683 mmiowb(); 684 barrier(); 685} 686 687static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, 688 u16 index, u8 op, u8 update) 689{ 690 if (bp->common.int_block == INT_BLOCK_HC) 691 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); 692 else { 693 u8 segment; 694 695 if (CHIP_INT_MODE_IS_BC(bp)) 696 segment = storm; 697 else if (igu_sb_id != bp->igu_dsb_id) 698 segment = IGU_SEG_ACCESS_DEF; 699 else if (storm == ATTENTION_ID) 700 segment = IGU_SEG_ACCESS_ATTN; 701 else 702 segment = IGU_SEG_ACCESS_DEF; 703 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); 704 } 705} 706 707static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) 708{ 709 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 710 COMMAND_REG_SIMD_MASK); 711 u32 result = REG_RD(bp, hc_addr); 712 713 barrier(); 714 return result; 715} 716 717static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) 718{ 719 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); 720 u32 result = REG_RD(bp, igu_addr); 721 722 DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", 723 result, igu_addr); 724 725 barrier(); 726 return result; 727} 728 729static inline u16 bnx2x_ack_int(struct bnx2x *bp) 730{ 731 barrier(); 732 if (bp->common.int_block == INT_BLOCK_HC) 733 return bnx2x_hc_ack_int(bp); 734 else 735 return bnx2x_igu_ack_int(bp); 736} 737 738static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) 739{ 740 /* Tell compiler that consumer and producer can change */ 741 barrier(); 742 return txdata->tx_pkt_prod != txdata->tx_pkt_cons; 743} 744 745static inline u16 bnx2x_tx_avail(struct bnx2x *bp, 746 struct bnx2x_fp_txdata *txdata) 747{ 748 s16 used; 749 u16 prod; 750 u16 cons; 751 752 prod = txdata->tx_bd_prod; 753 cons = txdata->tx_bd_cons; 754 755 used = SUB_S16(prod, cons); 756 757#ifdef BNX2X_STOP_ON_ERROR 758 WARN_ON(used < 0); 759 WARN_ON(used > txdata->tx_ring_size); 760 WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL); 761#endif 762 763 return (s16)(txdata->tx_ring_size) - used; 764} 765 766static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) 767{ 768 u16 hw_cons; 769 770 /* Tell compiler that status block fields can change */ 771 barrier(); 772 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 773 return hw_cons != txdata->tx_pkt_cons; 774} 775 776static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 777{ 778 u8 cos; 779 for_each_cos_in_tx_queue(fp, cos) 780 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 781 return true; 782 return false; 783} 784 785static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 786{ 787 u16 rx_cons_sb; 788 789 /* Tell compiler that status block fields can change */ 790 barrier(); 791 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 792 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 793 rx_cons_sb++; 794 return (fp->rx_comp_cons != rx_cons_sb); 795} 796 797/** 798 * bnx2x_tx_disable - disables tx from stack point of view 799 * 800 * @bp: driver handle 801 */ 802static inline void bnx2x_tx_disable(struct bnx2x *bp) 803{ 804 netif_tx_disable(bp->dev); 805 netif_carrier_off(bp->dev); 806} 807 808static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 809 struct bnx2x_fastpath *fp, u16 index) 810{ 811 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 812 struct page *page = sw_buf->page; 813 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 814 815 /* Skip "next page" elements */ 816 if (!page) 817 return; 818 819 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 820 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 821 __free_pages(page, PAGES_PER_SGE_SHIFT); 822 823 sw_buf->page = NULL; 824 sge->addr_hi = 0; 825 sge->addr_lo = 0; 826} 827 828static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) 829{ 830 int i; 831 832 /* Add NAPI objects */ 833 for_each_rx_queue_cnic(bp, i) 834 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 835 bnx2x_poll, BNX2X_NAPI_WEIGHT); 836} 837 838static inline void bnx2x_add_all_napi(struct bnx2x *bp) 839{ 840 int i; 841 842 /* Add NAPI objects */ 843 for_each_eth_queue(bp, i) 844 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 845 bnx2x_poll, BNX2X_NAPI_WEIGHT); 846} 847 848static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) 849{ 850 int i; 851 852 for_each_rx_queue_cnic(bp, i) 853 netif_napi_del(&bnx2x_fp(bp, i, napi)); 854} 855 856static inline void bnx2x_del_all_napi(struct bnx2x *bp) 857{ 858 int i; 859 860 for_each_eth_queue(bp, i) 861 netif_napi_del(&bnx2x_fp(bp, i, napi)); 862} 863 864int bnx2x_set_int_mode(struct bnx2x *bp); 865 866static inline void bnx2x_disable_msi(struct bnx2x *bp) 867{ 868 if (bp->flags & USING_MSIX_FLAG) { 869 pci_disable_msix(bp->pdev); 870 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); 871 } else if (bp->flags & USING_MSI_FLAG) { 872 pci_disable_msi(bp->pdev); 873 bp->flags &= ~USING_MSI_FLAG; 874 } 875} 876 877static inline int bnx2x_calc_num_queues(struct bnx2x *bp) 878{ 879 return num_queues ? 880 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 881 min_t(int, netif_get_num_default_rss_queues(), 882 BNX2X_MAX_QUEUES(bp)); 883} 884 885static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 886{ 887 int i, j; 888 889 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 890 int idx = RX_SGE_CNT * i - 1; 891 892 for (j = 0; j < 2; j++) { 893 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 894 idx--; 895 } 896 } 897} 898 899static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) 900{ 901 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ 902 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 903 904 /* Clear the two last indices in the page to 1: 905 these are the indices that correspond to the "next" element, 906 hence will never be indicated and should be removed from 907 the calculations. */ 908 bnx2x_clear_sge_mask_next_elems(fp); 909} 910 911/* note that we are not allocating a new buffer, 912 * we are just moving one from cons to prod 913 * we are not creating a new mapping, 914 * so there is no need to check for dma_mapping_error(). 915 */ 916static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, 917 u16 cons, u16 prod) 918{ 919 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 920 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 921 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 922 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 923 924 dma_unmap_addr_set(prod_rx_buf, mapping, 925 dma_unmap_addr(cons_rx_buf, mapping)); 926 prod_rx_buf->data = cons_rx_buf->data; 927 *prod_bd = *cons_bd; 928} 929 930/************************* Init ******************************************/ 931 932/* returns func by VN for current port */ 933static inline int func_by_vn(struct bnx2x *bp, int vn) 934{ 935 return 2 * vn + BP_PORT(bp); 936} 937 938static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) 939{ 940 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); 941} 942 943/** 944 * bnx2x_func_start - init function 945 * 946 * @bp: driver handle 947 * 948 * Must be called before sending CLIENT_SETUP for the first client. 949 */ 950static inline int bnx2x_func_start(struct bnx2x *bp) 951{ 952 struct bnx2x_func_state_params func_params = {NULL}; 953 struct bnx2x_func_start_params *start_params = 954 &func_params.params.start; 955 956 /* Prepare parameters for function state transitions */ 957 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 958 959 func_params.f_obj = &bp->func_obj; 960 func_params.cmd = BNX2X_F_CMD_START; 961 962 /* Function parameters */ 963 start_params->mf_mode = bp->mf_mode; 964 start_params->sd_vlan_tag = bp->mf_ov; 965 966 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) 967 start_params->network_cos_mode = STATIC_COS; 968 else /* CHIP_IS_E1X */ 969 start_params->network_cos_mode = FW_WRR; 970 971 return bnx2x_func_state_change(bp, &func_params); 972} 973 974 975/** 976 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format 977 * 978 * @fw_hi: pointer to upper part 979 * @fw_mid: pointer to middle part 980 * @fw_lo: pointer to lower part 981 * @mac: pointer to MAC address 982 */ 983static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, 984 u8 *mac) 985{ 986 ((u8 *)fw_hi)[0] = mac[1]; 987 ((u8 *)fw_hi)[1] = mac[0]; 988 ((u8 *)fw_mid)[0] = mac[3]; 989 ((u8 *)fw_mid)[1] = mac[2]; 990 ((u8 *)fw_lo)[0] = mac[5]; 991 ((u8 *)fw_lo)[1] = mac[4]; 992} 993 994static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 995 struct bnx2x_fastpath *fp, int last) 996{ 997 int i; 998 999 if (fp->disable_tpa) 1000 return; 1001 1002 for (i = 0; i < last; i++) 1003 bnx2x_free_rx_sge(bp, fp, i); 1004} 1005 1006static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1007{ 1008 int i; 1009 1010 for (i = 1; i <= NUM_RX_RINGS; i++) { 1011 struct eth_rx_bd *rx_bd; 1012 1013 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; 1014 rx_bd->addr_hi = 1015 cpu_to_le32(U64_HI(fp->rx_desc_mapping + 1016 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1017 rx_bd->addr_lo = 1018 cpu_to_le32(U64_LO(fp->rx_desc_mapping + 1019 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1020 } 1021} 1022 1023/* Statistics ID are global per chip/path, while Client IDs for E1x are per 1024 * port. 1025 */ 1026static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) 1027{ 1028 struct bnx2x *bp = fp->bp; 1029 if (!CHIP_IS_E1x(bp)) { 1030 /* there are special statistics counters for FCoE 136..140 */ 1031 if (IS_FCOE_FP(fp)) 1032 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1033 return fp->cl_id; 1034 } 1035 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1036} 1037 1038static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, 1039 bnx2x_obj_type obj_type) 1040{ 1041 struct bnx2x *bp = fp->bp; 1042 1043 /* Configure classification DBs */ 1044 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, 1045 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 1046 bnx2x_sp_mapping(bp, mac_rdata), 1047 BNX2X_FILTER_MAC_PENDING, 1048 &bp->sp_state, obj_type, 1049 &bp->macs_pool); 1050} 1051 1052/** 1053 * bnx2x_get_path_func_num - get number of active functions 1054 * 1055 * @bp: driver handle 1056 * 1057 * Calculates the number of active (not hidden) functions on the 1058 * current path. 1059 */ 1060static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) 1061{ 1062 u8 func_num = 0, i; 1063 1064 /* 57710 has only one function per-port */ 1065 if (CHIP_IS_E1(bp)) 1066 return 1; 1067 1068 /* Calculate a number of functions enabled on the current 1069 * PATH/PORT. 1070 */ 1071 if (CHIP_REV_IS_SLOW(bp)) { 1072 if (IS_MF(bp)) 1073 func_num = 4; 1074 else 1075 func_num = 2; 1076 } else { 1077 for (i = 0; i < E1H_FUNC_MAX / 2; i++) { 1078 u32 func_config = 1079 MF_CFG_RD(bp, 1080 func_mf_config[BP_PORT(bp) + 2 * i]. 1081 config); 1082 func_num += 1083 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); 1084 } 1085 } 1086 1087 WARN_ON(!func_num); 1088 1089 return func_num; 1090} 1091 1092static inline void bnx2x_init_bp_objs(struct bnx2x *bp) 1093{ 1094 /* RX_MODE controlling object */ 1095 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); 1096 1097 /* multicast configuration controlling object */ 1098 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, 1099 BP_FUNC(bp), BP_FUNC(bp), 1100 bnx2x_sp(bp, mcast_rdata), 1101 bnx2x_sp_mapping(bp, mcast_rdata), 1102 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, 1103 BNX2X_OBJ_TYPE_RX); 1104 1105 /* Setup CAM credit pools */ 1106 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), 1107 bnx2x_get_path_func_num(bp)); 1108 1109 bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, 1110 bnx2x_get_path_func_num(bp)); 1111 1112 /* RSS configuration object */ 1113 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, 1114 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), 1115 bnx2x_sp(bp, rss_rdata), 1116 bnx2x_sp_mapping(bp, rss_rdata), 1117 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, 1118 BNX2X_OBJ_TYPE_RX); 1119} 1120 1121static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 1122{ 1123 if (CHIP_IS_E1x(fp->bp)) 1124 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; 1125 else 1126 return fp->cl_id; 1127} 1128 1129static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) 1130{ 1131 struct bnx2x *bp = fp->bp; 1132 u32 offset = BAR_USTRORM_INTMEM; 1133 1134 if (IS_VF(bp)) 1135 return PXP_VF_ADDR_USDM_QUEUES_START + 1136 bp->acquire_resp.resc.hw_qid[fp->index] * 1137 sizeof(struct ustorm_queue_zone_data); 1138 else if (!CHIP_IS_E1x(bp)) 1139 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); 1140 else 1141 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); 1142 1143 return offset; 1144} 1145 1146static inline void bnx2x_init_txdata(struct bnx2x *bp, 1147 struct bnx2x_fp_txdata *txdata, u32 cid, 1148 int txq_index, __le16 *tx_cons_sb, 1149 struct bnx2x_fastpath *fp) 1150{ 1151 txdata->cid = cid; 1152 txdata->txq_index = txq_index; 1153 txdata->tx_cons_sb = tx_cons_sb; 1154 txdata->parent_fp = fp; 1155 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; 1156 1157 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1158 txdata->cid, txdata->txq_index); 1159} 1160 1161static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1162{ 1163 return bp->cnic_base_cl_id + cl_idx + 1164 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; 1165} 1166 1167static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1168{ 1169 1170 /* the 'first' id is allocated for the cnic */ 1171 return bp->base_fw_ndsb; 1172} 1173 1174static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) 1175{ 1176 return bp->igu_base_sb; 1177} 1178 1179 1180static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1181{ 1182 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 1183 unsigned long q_type = 0; 1184 1185 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1186 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1187 BNX2X_FCOE_ETH_CL_ID_IDX); 1188 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); 1189 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1190 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1191 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1192 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), 1193 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, 1194 fp); 1195 1196 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 1197 1198 /* qZone id equals to FW (per path) client id */ 1199 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 1200 /* init shortcut */ 1201 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 1202 bnx2x_rx_ustorm_prods_offset(fp); 1203 1204 /* Configure Queue State object */ 1205 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1206 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1207 1208 /* No multi-CoS for FCoE L2 client */ 1209 BUG_ON(fp->max_cos != 1); 1210 1211 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, 1212 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1213 bnx2x_sp_mapping(bp, q_rdata), q_type); 1214 1215 DP(NETIF_MSG_IFUP, 1216 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 1217 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1218 fp->igu_sb_id); 1219} 1220 1221static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1222 struct bnx2x_fp_txdata *txdata) 1223{ 1224 int cnt = 1000; 1225 1226 while (bnx2x_has_tx_work_unload(txdata)) { 1227 if (!cnt) { 1228 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", 1229 txdata->txq_index, txdata->tx_pkt_prod, 1230 txdata->tx_pkt_cons); 1231#ifdef BNX2X_STOP_ON_ERROR 1232 bnx2x_panic(); 1233 return -EBUSY; 1234#else 1235 break; 1236#endif 1237 } 1238 cnt--; 1239 usleep_range(1000, 1000); 1240 } 1241 1242 return 0; 1243} 1244 1245int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1246 1247static inline void __storm_memset_struct(struct bnx2x *bp, 1248 u32 addr, size_t size, u32 *data) 1249{ 1250 int i; 1251 for (i = 0; i < size/4; i++) 1252 REG_WR(bp, addr + (i * 4), data[i]); 1253} 1254 1255/** 1256 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1257 * 1258 * @bp: driver handle 1259 * @mask: bits that need to be cleared 1260 */ 1261static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) 1262{ 1263 int tout = 5000; /* Wait for 5 secs tops */ 1264 1265 while (tout--) { 1266 smp_mb(); 1267 netif_addr_lock_bh(bp->dev); 1268 if (!(bp->sp_state & mask)) { 1269 netif_addr_unlock_bh(bp->dev); 1270 return true; 1271 } 1272 netif_addr_unlock_bh(bp->dev); 1273 1274 usleep_range(1000, 1000); 1275 } 1276 1277 smp_mb(); 1278 1279 netif_addr_lock_bh(bp->dev); 1280 if (bp->sp_state & mask) { 1281 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", 1282 bp->sp_state, mask); 1283 netif_addr_unlock_bh(bp->dev); 1284 return false; 1285 } 1286 netif_addr_unlock_bh(bp->dev); 1287 1288 return true; 1289} 1290 1291/** 1292 * bnx2x_set_ctx_validation - set CDU context validation values 1293 * 1294 * @bp: driver handle 1295 * @cxt: context of the connection on the host memory 1296 * @cid: SW CID of the connection to be configured 1297 */ 1298void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1299 u32 cid); 1300 1301void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1302 u8 sb_index, u8 disable, u16 usec); 1303void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1304void bnx2x_release_phy_lock(struct bnx2x *bp); 1305 1306/** 1307 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1308 * 1309 * @bp: driver handle 1310 * @mf_cfg: MF configuration 1311 * 1312 */ 1313static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1314{ 1315 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1316 FUNC_MF_CFG_MAX_BW_SHIFT; 1317 if (!max_cfg) { 1318 DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, 1319 "Max BW configured to 0 - using 100 instead\n"); 1320 max_cfg = 100; 1321 } 1322 return max_cfg; 1323} 1324 1325/* checks if HW supports GRO for given MTU */ 1326static inline bool bnx2x_mtu_allows_gro(int mtu) 1327{ 1328 /* gro frags per page */ 1329 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); 1330 1331 /* 1332 * 1. number of frags should not grow above MAX_SKB_FRAGS 1333 * 2. frag must fit the page 1334 */ 1335 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1336} 1337 1338/** 1339 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1340 * 1341 * @bp: driver handle 1342 * 1343 */ 1344void bnx2x_get_iscsi_info(struct bnx2x *bp); 1345 1346/** 1347 * bnx2x_link_sync_notify - send notification to other functions. 1348 * 1349 * @bp: driver handle 1350 * 1351 */ 1352static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 1353{ 1354 int func; 1355 int vn; 1356 1357 /* Set the attention towards other drivers on the same port */ 1358 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 1359 if (vn == BP_VN(bp)) 1360 continue; 1361 1362 func = func_by_vn(bp, vn); 1363 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 1364 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 1365 } 1366} 1367 1368/** 1369 * bnx2x_update_drv_flags - update flags in shmem 1370 * 1371 * @bp: driver handle 1372 * @flags: flags to update 1373 * @set: set or clear 1374 * 1375 */ 1376static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) 1377{ 1378 if (SHMEM2_HAS(bp, drv_flags)) { 1379 u32 drv_flags; 1380 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1381 drv_flags = SHMEM2_RD(bp, drv_flags); 1382 1383 if (set) 1384 SET_FLAGS(drv_flags, flags); 1385 else 1386 RESET_FLAGS(drv_flags, flags); 1387 1388 SHMEM2_WR(bp, drv_flags, drv_flags); 1389 DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); 1390 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1391 } 1392} 1393 1394static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1395{ 1396 if (is_valid_ether_addr(addr) || 1397 (is_zero_ether_addr(addr) && 1398 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))) 1399 return true; 1400 1401 return false; 1402} 1403 1404/** 1405 * bnx2x_fill_fw_str - Fill buffer with FW version string. 1406 * 1407 * @bp: driver handle 1408 * @buf: character buffer to fill with the fw name 1409 * @buf_len: length of the above buffer 1410 * 1411 */ 1412void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); 1413#endif /* BNX2X_CMN_H */ 1414