bnx2x_cmn.h revision 3cdeec22e40264e40d34d8242b4ce7461329a80a
1/* bnx2x_cmn.h: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2013 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17#ifndef BNX2X_CMN_H 18#define BNX2X_CMN_H 19 20#include <linux/types.h> 21#include <linux/pci.h> 22#include <linux/netdevice.h> 23#include <linux/etherdevice.h> 24 25#include "bnx2x.h" 26#include "bnx2x_sriov.h" 27 28/* This is used as a replacement for an MCP if it's not present */ 29extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 30 31extern int num_queues; 32extern int int_mode; 33 34/************************ Macros ********************************/ 35#define BNX2X_PCI_FREE(x, y, size) \ 36 do { \ 37 if (x) { \ 38 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 39 x = NULL; \ 40 y = 0; \ 41 } \ 42 } while (0) 43 44#define BNX2X_FREE(x) \ 45 do { \ 46 if (x) { \ 47 kfree((void *)x); \ 48 x = NULL; \ 49 } \ 50 } while (0) 51 52#define BNX2X_PCI_ALLOC(x, y, size) \ 53do { \ 54 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \ 55 GFP_KERNEL | __GFP_ZERO); \ 56 if (x == NULL) \ 57 goto alloc_mem_err; \ 58} while (0) 59 60#define BNX2X_ALLOC(x, size) \ 61 do { \ 62 x = kzalloc(size, GFP_KERNEL); \ 63 if (x == NULL) \ 64 goto alloc_mem_err; \ 65 } while (0) 66 67/*********************** Interfaces **************************** 68 * Functions that need to be implemented by each driver version 69 */ 70/* Init */ 71 72/** 73 * bnx2x_send_unload_req - request unload mode from the MCP. 74 * 75 * @bp: driver handle 76 * @unload_mode: requested function's unload mode 77 * 78 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 79 */ 80u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 81 82/** 83 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 84 * 85 * @bp: driver handle 86 * @keep_link: true iff link should be kept up 87 */ 88void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); 89 90/** 91 * bnx2x_config_rss_pf - configure RSS parameters in a PF. 92 * 93 * @bp: driver handle 94 * @rss_obj: RSS object to use 95 * @ind_table: indirection table to configure 96 * @config_hash: re-configure RSS hash keys configuration 97 */ 98int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 99 bool config_hash); 100 101/** 102 * bnx2x__init_func_obj - init function object 103 * 104 * @bp: driver handle 105 * 106 * Initializes the Function Object with the appropriate 107 * parameters which include a function slow path driver 108 * interface. 109 */ 110void bnx2x__init_func_obj(struct bnx2x *bp); 111 112/** 113 * bnx2x_setup_queue - setup eth queue. 114 * 115 * @bp: driver handle 116 * @fp: pointer to the fastpath structure 117 * @leading: boolean 118 * 119 */ 120int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 121 bool leading); 122 123/** 124 * bnx2x_setup_leading - bring up a leading eth queue. 125 * 126 * @bp: driver handle 127 */ 128int bnx2x_setup_leading(struct bnx2x *bp); 129 130/** 131 * bnx2x_fw_command - send the MCP a request 132 * 133 * @bp: driver handle 134 * @command: request 135 * @param: request's parameter 136 * 137 * block until there is a reply 138 */ 139u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 140 141/** 142 * bnx2x_initial_phy_init - initialize link parameters structure variables. 143 * 144 * @bp: driver handle 145 * @load_mode: current mode 146 */ 147int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 148 149/** 150 * bnx2x_link_set - configure hw according to link parameters structure. 151 * 152 * @bp: driver handle 153 */ 154void bnx2x_link_set(struct bnx2x *bp); 155 156/** 157 * bnx2x_force_link_reset - Forces link reset, and put the PHY 158 * in reset as well. 159 * 160 * @bp: driver handle 161 */ 162void bnx2x_force_link_reset(struct bnx2x *bp); 163 164/** 165 * bnx2x_link_test - query link status. 166 * 167 * @bp: driver handle 168 * @is_serdes: bool 169 * 170 * Returns 0 if link is UP. 171 */ 172u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 173 174/** 175 * bnx2x_drv_pulse - write driver pulse to shmem 176 * 177 * @bp: driver handle 178 * 179 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 180 * in the shmem. 181 */ 182void bnx2x_drv_pulse(struct bnx2x *bp); 183 184/** 185 * bnx2x_igu_ack_sb - update IGU with current SB value 186 * 187 * @bp: driver handle 188 * @igu_sb_id: SB id 189 * @segment: SB segment 190 * @index: SB index 191 * @op: SB operation 192 * @update: is HW update required 193 */ 194void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 195 u16 index, u8 op, u8 update); 196 197/* Disable transactions from chip to host */ 198void bnx2x_pf_disable(struct bnx2x *bp); 199int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); 200 201/** 202 * bnx2x__link_status_update - handles link status change. 203 * 204 * @bp: driver handle 205 */ 206void bnx2x__link_status_update(struct bnx2x *bp); 207 208/** 209 * bnx2x_link_report - report link status to upper layer. 210 * 211 * @bp: driver handle 212 */ 213void bnx2x_link_report(struct bnx2x *bp); 214 215/* None-atomic version of bnx2x_link_report() */ 216void __bnx2x_link_report(struct bnx2x *bp); 217 218/** 219 * bnx2x_get_mf_speed - calculate MF speed. 220 * 221 * @bp: driver handle 222 * 223 * Takes into account current linespeed and MF configuration. 224 */ 225u16 bnx2x_get_mf_speed(struct bnx2x *bp); 226 227/** 228 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler 229 * 230 * @irq: irq number 231 * @dev_instance: private instance 232 */ 233irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 234 235/** 236 * bnx2x_interrupt - non MSI-X interrupt handler 237 * 238 * @irq: irq number 239 * @dev_instance: private instance 240 */ 241irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 242 243/** 244 * bnx2x_cnic_notify - send command to cnic driver 245 * 246 * @bp: driver handle 247 * @cmd: command 248 */ 249int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 250 251/** 252 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information 253 * 254 * @bp: driver handle 255 */ 256void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 257 258/** 259 * bnx2x_setup_cnic_info - provides cnic with updated info 260 * 261 * @bp: driver handle 262 */ 263void bnx2x_setup_cnic_info(struct bnx2x *bp); 264 265/** 266 * bnx2x_int_enable - enable HW interrupts. 267 * 268 * @bp: driver handle 269 */ 270void bnx2x_int_enable(struct bnx2x *bp); 271 272/** 273 * bnx2x_int_disable_sync - disable interrupts. 274 * 275 * @bp: driver handle 276 * @disable_hw: true, disable HW interrupts. 277 * 278 * This function ensures that there are no 279 * ISRs or SP DPCs (sp_task) are running after it returns. 280 */ 281void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 282 283/** 284 * bnx2x_nic_init_cnic - init driver internals for cnic. 285 * 286 * @bp: driver handle 287 * @load_code: COMMON, PORT or FUNCTION 288 * 289 * Initializes: 290 * - rings 291 * - status blocks 292 * - etc. 293 */ 294void bnx2x_nic_init_cnic(struct bnx2x *bp); 295 296/** 297 * bnx2x_preirq_nic_init - init driver internals. 298 * 299 * @bp: driver handle 300 * 301 * Initializes: 302 * - fastpath object 303 * - fastpath rings 304 * etc. 305 */ 306void bnx2x_pre_irq_nic_init(struct bnx2x *bp); 307 308/** 309 * bnx2x_postirq_nic_init - init driver internals. 310 * 311 * @bp: driver handle 312 * @load_code: COMMON, PORT or FUNCTION 313 * 314 * Initializes: 315 * - status blocks 316 * - slowpath rings 317 * - etc. 318 */ 319void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code); 320/** 321 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic. 322 * 323 * @bp: driver handle 324 */ 325int bnx2x_alloc_mem_cnic(struct bnx2x *bp); 326/** 327 * bnx2x_alloc_mem - allocate driver's memory. 328 * 329 * @bp: driver handle 330 */ 331int bnx2x_alloc_mem(struct bnx2x *bp); 332 333/** 334 * bnx2x_free_mem_cnic - release driver's memory for cnic. 335 * 336 * @bp: driver handle 337 */ 338void bnx2x_free_mem_cnic(struct bnx2x *bp); 339/** 340 * bnx2x_free_mem - release driver's memory. 341 * 342 * @bp: driver handle 343 */ 344void bnx2x_free_mem(struct bnx2x *bp); 345 346/** 347 * bnx2x_set_num_queues - set number of queues according to mode. 348 * 349 * @bp: driver handle 350 */ 351void bnx2x_set_num_queues(struct bnx2x *bp); 352 353/** 354 * bnx2x_chip_cleanup - cleanup chip internals. 355 * 356 * @bp: driver handle 357 * @unload_mode: COMMON, PORT, FUNCTION 358 * @keep_link: true iff link should be kept up. 359 * 360 * - Cleanup MAC configuration. 361 * - Closes clients. 362 * - etc. 363 */ 364void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); 365 366/** 367 * bnx2x_acquire_hw_lock - acquire HW lock. 368 * 369 * @bp: driver handle 370 * @resource: resource bit which was locked 371 */ 372int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 373 374/** 375 * bnx2x_release_hw_lock - release HW lock. 376 * 377 * @bp: driver handle 378 * @resource: resource bit which was locked 379 */ 380int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 381 382/** 383 * bnx2x_release_leader_lock - release recovery leader lock 384 * 385 * @bp: driver handle 386 */ 387int bnx2x_release_leader_lock(struct bnx2x *bp); 388 389/** 390 * bnx2x_set_eth_mac - configure eth MAC address in the HW 391 * 392 * @bp: driver handle 393 * @set: set or clear 394 * 395 * Configures according to the value in netdev->dev_addr. 396 */ 397int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 398 399/** 400 * bnx2x_set_rx_mode - set MAC filtering configurations. 401 * 402 * @dev: netdevice 403 * 404 * called with netif_tx_lock from dev_mcast.c 405 * If bp->state is OPEN, should be called with 406 * netif_addr_lock_bh() 407 */ 408void bnx2x_set_rx_mode(struct net_device *dev); 409 410/** 411 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 412 * 413 * @bp: driver handle 414 * 415 * If bp->state is OPEN, should be called with 416 * netif_addr_lock_bh(). 417 */ 418int bnx2x_set_storm_rx_mode(struct bnx2x *bp); 419 420/** 421 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. 422 * 423 * @bp: driver handle 424 * @cl_id: client id 425 * @rx_mode_flags: rx mode configuration 426 * @rx_accept_flags: rx accept configuration 427 * @tx_accept_flags: tx accept configuration (tx switch) 428 * @ramrod_flags: ramrod configuration 429 */ 430int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 431 unsigned long rx_mode_flags, 432 unsigned long rx_accept_flags, 433 unsigned long tx_accept_flags, 434 unsigned long ramrod_flags); 435 436/* Parity errors related */ 437void bnx2x_set_pf_load(struct bnx2x *bp); 438bool bnx2x_clear_pf_load(struct bnx2x *bp); 439bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 440bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 441void bnx2x_set_reset_in_progress(struct bnx2x *bp); 442void bnx2x_set_reset_global(struct bnx2x *bp); 443void bnx2x_disable_close_the_gate(struct bnx2x *bp); 444int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 445 446/** 447 * bnx2x_sp_event - handle ramrods completion. 448 * 449 * @fp: fastpath handle for the event 450 * @rr_cqe: eth_rx_cqe 451 */ 452void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 453 454/** 455 * bnx2x_ilt_set_info - prepare ILT configurations. 456 * 457 * @bp: driver handle 458 */ 459void bnx2x_ilt_set_info(struct bnx2x *bp); 460 461/** 462 * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC 463 * and TM. 464 * 465 * @bp: driver handle 466 */ 467void bnx2x_ilt_set_info_cnic(struct bnx2x *bp); 468 469/** 470 * bnx2x_dcbx_init - initialize dcbx protocol. 471 * 472 * @bp: driver handle 473 */ 474void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); 475 476/** 477 * bnx2x_set_power_state - set power state to the requested value. 478 * 479 * @bp: driver handle 480 * @state: required state D0 or D3hot 481 * 482 * Currently only D0 and D3hot are supported. 483 */ 484int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 485 486/** 487 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. 488 * 489 * @bp: driver handle 490 * @value: new value 491 */ 492void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 493/* Error handling */ 494void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 495 496/* dev_close main block */ 497int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); 498 499/* dev_open main block */ 500int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 501 502/* hard_xmit callback */ 503netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 504 505/* setup_tc callback */ 506int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 507 508int bnx2x_get_vf_config(struct net_device *dev, int vf, 509 struct ifla_vf_info *ivi); 510int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); 511int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 512 513/* select_queue callback */ 514u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 515 516static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 517 struct bnx2x_fastpath *fp, 518 u16 bd_prod, u16 rx_comp_prod, 519 u16 rx_sge_prod) 520{ 521 struct ustorm_eth_rx_producers rx_prods = {0}; 522 u32 i; 523 524 /* Update producers */ 525 rx_prods.bd_prod = bd_prod; 526 rx_prods.cqe_prod = rx_comp_prod; 527 rx_prods.sge_prod = rx_sge_prod; 528 529 /* Make sure that the BD and SGE data is updated before updating the 530 * producers since FW might read the BD/SGE right after the producer 531 * is updated. 532 * This is only applicable for weak-ordered memory model archs such 533 * as IA-64. The following barrier is also mandatory since FW will 534 * assumes BDs must have buffers. 535 */ 536 wmb(); 537 538 for (i = 0; i < sizeof(rx_prods)/4; i++) 539 REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, 540 ((u32 *)&rx_prods)[i]); 541 542 mmiowb(); /* keep prod updates ordered */ 543 544 DP(NETIF_MSG_RX_STATUS, 545 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", 546 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 547} 548 549/* reload helper */ 550int bnx2x_reload_if_running(struct net_device *dev); 551 552int bnx2x_change_mac_addr(struct net_device *dev, void *p); 553 554/* NAPI poll Rx part */ 555int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); 556 557/* NAPI poll Tx part */ 558int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 559 560/* suspend/resume callbacks */ 561int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 562int bnx2x_resume(struct pci_dev *pdev); 563 564/* Release IRQ vectors */ 565void bnx2x_free_irq(struct bnx2x *bp); 566 567void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); 568void bnx2x_free_fp_mem(struct bnx2x *bp); 569int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); 570int bnx2x_alloc_fp_mem(struct bnx2x *bp); 571void bnx2x_init_rx_rings(struct bnx2x *bp); 572void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); 573void bnx2x_free_skbs_cnic(struct bnx2x *bp); 574void bnx2x_free_skbs(struct bnx2x *bp); 575void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 576void bnx2x_netif_start(struct bnx2x *bp); 577int bnx2x_load_cnic(struct bnx2x *bp); 578 579/** 580 * bnx2x_enable_msix - set msix configuration. 581 * 582 * @bp: driver handle 583 * 584 * fills msix_table, requests vectors, updates num_queues 585 * according to number of available vectors. 586 */ 587int bnx2x_enable_msix(struct bnx2x *bp); 588 589/** 590 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 591 * 592 * @bp: driver handle 593 */ 594int bnx2x_enable_msi(struct bnx2x *bp); 595 596/** 597 * bnx2x_poll - NAPI callback 598 * 599 * @napi: napi structure 600 * @budget: 601 * 602 */ 603int bnx2x_poll(struct napi_struct *napi, int budget); 604 605/** 606 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 607 * 608 * @bp: driver handle 609 */ 610int bnx2x_alloc_mem_bp(struct bnx2x *bp); 611 612/** 613 * bnx2x_free_mem_bp - release memories outsize main driver structure 614 * 615 * @bp: driver handle 616 */ 617void bnx2x_free_mem_bp(struct bnx2x *bp); 618 619/** 620 * bnx2x_change_mtu - change mtu netdev callback 621 * 622 * @dev: net device 623 * @new_mtu: requested mtu 624 * 625 */ 626int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 627 628#ifdef NETDEV_FCOE_WWNN 629/** 630 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 631 * 632 * @dev: net_device 633 * @wwn: output buffer 634 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) 635 * 636 */ 637int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 638#endif 639 640netdev_features_t bnx2x_fix_features(struct net_device *dev, 641 netdev_features_t features); 642int bnx2x_set_features(struct net_device *dev, netdev_features_t features); 643 644/** 645 * bnx2x_tx_timeout - tx timeout netdev callback 646 * 647 * @dev: net device 648 */ 649void bnx2x_tx_timeout(struct net_device *dev); 650 651/*********************** Inlines **********************************/ 652/*********************** Fast path ********************************/ 653static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 654{ 655 barrier(); /* status block is written to by the chip */ 656 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; 657} 658 659static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, 660 u8 segment, u16 index, u8 op, 661 u8 update, u32 igu_addr) 662{ 663 struct igu_regular cmd_data = {0}; 664 665 cmd_data.sb_id_and_flags = 666 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 667 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 668 (update << IGU_REGULAR_BUPDATE_SHIFT) | 669 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 670 671 DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 672 cmd_data.sb_id_and_flags, igu_addr); 673 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); 674 675 /* Make sure that ACK is written */ 676 mmiowb(); 677 barrier(); 678} 679 680static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 681 u8 storm, u16 index, u8 op, u8 update) 682{ 683 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 684 COMMAND_REG_INT_ACK); 685 struct igu_ack_register igu_ack; 686 687 igu_ack.status_block_index = index; 688 igu_ack.sb_id_and_flags = 689 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 690 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 691 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 692 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 693 694 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); 695 696 /* Make sure that ACK is written */ 697 mmiowb(); 698 barrier(); 699} 700 701static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, 702 u16 index, u8 op, u8 update) 703{ 704 if (bp->common.int_block == INT_BLOCK_HC) 705 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); 706 else { 707 u8 segment; 708 709 if (CHIP_INT_MODE_IS_BC(bp)) 710 segment = storm; 711 else if (igu_sb_id != bp->igu_dsb_id) 712 segment = IGU_SEG_ACCESS_DEF; 713 else if (storm == ATTENTION_ID) 714 segment = IGU_SEG_ACCESS_ATTN; 715 else 716 segment = IGU_SEG_ACCESS_DEF; 717 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); 718 } 719} 720 721static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) 722{ 723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 724 COMMAND_REG_SIMD_MASK); 725 u32 result = REG_RD(bp, hc_addr); 726 727 barrier(); 728 return result; 729} 730 731static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) 732{ 733 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); 734 u32 result = REG_RD(bp, igu_addr); 735 736 DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", 737 result, igu_addr); 738 739 barrier(); 740 return result; 741} 742 743static inline u16 bnx2x_ack_int(struct bnx2x *bp) 744{ 745 barrier(); 746 if (bp->common.int_block == INT_BLOCK_HC) 747 return bnx2x_hc_ack_int(bp); 748 else 749 return bnx2x_igu_ack_int(bp); 750} 751 752static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) 753{ 754 /* Tell compiler that consumer and producer can change */ 755 barrier(); 756 return txdata->tx_pkt_prod != txdata->tx_pkt_cons; 757} 758 759static inline u16 bnx2x_tx_avail(struct bnx2x *bp, 760 struct bnx2x_fp_txdata *txdata) 761{ 762 s16 used; 763 u16 prod; 764 u16 cons; 765 766 prod = txdata->tx_bd_prod; 767 cons = txdata->tx_bd_cons; 768 769 used = SUB_S16(prod, cons); 770 771#ifdef BNX2X_STOP_ON_ERROR 772 WARN_ON(used < 0); 773 WARN_ON(used > txdata->tx_ring_size); 774 WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL); 775#endif 776 777 return (s16)(txdata->tx_ring_size) - used; 778} 779 780static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) 781{ 782 u16 hw_cons; 783 784 /* Tell compiler that status block fields can change */ 785 barrier(); 786 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 787 return hw_cons != txdata->tx_pkt_cons; 788} 789 790static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 791{ 792 u8 cos; 793 for_each_cos_in_tx_queue(fp, cos) 794 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 795 return true; 796 return false; 797} 798 799static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 800{ 801 u16 rx_cons_sb; 802 803 /* Tell compiler that status block fields can change */ 804 barrier(); 805 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 806 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 807 rx_cons_sb++; 808 return (fp->rx_comp_cons != rx_cons_sb); 809} 810 811/** 812 * bnx2x_tx_disable - disables tx from stack point of view 813 * 814 * @bp: driver handle 815 */ 816static inline void bnx2x_tx_disable(struct bnx2x *bp) 817{ 818 netif_tx_disable(bp->dev); 819 netif_carrier_off(bp->dev); 820} 821 822static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 823 struct bnx2x_fastpath *fp, u16 index) 824{ 825 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 826 struct page *page = sw_buf->page; 827 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 828 829 /* Skip "next page" elements */ 830 if (!page) 831 return; 832 833 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 834 SGE_PAGES, DMA_FROM_DEVICE); 835 __free_pages(page, PAGES_PER_SGE_SHIFT); 836 837 sw_buf->page = NULL; 838 sge->addr_hi = 0; 839 sge->addr_lo = 0; 840} 841 842static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) 843{ 844 int i; 845 846 /* Add NAPI objects */ 847 for_each_rx_queue_cnic(bp, i) 848 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 849 bnx2x_poll, NAPI_POLL_WEIGHT); 850} 851 852static inline void bnx2x_add_all_napi(struct bnx2x *bp) 853{ 854 int i; 855 856 /* Add NAPI objects */ 857 for_each_eth_queue(bp, i) 858 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 859 bnx2x_poll, NAPI_POLL_WEIGHT); 860} 861 862static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) 863{ 864 int i; 865 866 for_each_rx_queue_cnic(bp, i) 867 netif_napi_del(&bnx2x_fp(bp, i, napi)); 868} 869 870static inline void bnx2x_del_all_napi(struct bnx2x *bp) 871{ 872 int i; 873 874 for_each_eth_queue(bp, i) 875 netif_napi_del(&bnx2x_fp(bp, i, napi)); 876} 877 878int bnx2x_set_int_mode(struct bnx2x *bp); 879 880static inline void bnx2x_disable_msi(struct bnx2x *bp) 881{ 882 if (bp->flags & USING_MSIX_FLAG) { 883 pci_disable_msix(bp->pdev); 884 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); 885 } else if (bp->flags & USING_MSI_FLAG) { 886 pci_disable_msi(bp->pdev); 887 bp->flags &= ~USING_MSI_FLAG; 888 } 889} 890 891static inline int bnx2x_calc_num_queues(struct bnx2x *bp) 892{ 893 return num_queues ? 894 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 895 min_t(int, netif_get_num_default_rss_queues(), 896 BNX2X_MAX_QUEUES(bp)); 897} 898 899static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 900{ 901 int i, j; 902 903 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 904 int idx = RX_SGE_CNT * i - 1; 905 906 for (j = 0; j < 2; j++) { 907 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 908 idx--; 909 } 910 } 911} 912 913static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) 914{ 915 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ 916 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 917 918 /* Clear the two last indices in the page to 1: 919 these are the indices that correspond to the "next" element, 920 hence will never be indicated and should be removed from 921 the calculations. */ 922 bnx2x_clear_sge_mask_next_elems(fp); 923} 924 925/* note that we are not allocating a new buffer, 926 * we are just moving one from cons to prod 927 * we are not creating a new mapping, 928 * so there is no need to check for dma_mapping_error(). 929 */ 930static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, 931 u16 cons, u16 prod) 932{ 933 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 934 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 935 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 936 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 937 938 dma_unmap_addr_set(prod_rx_buf, mapping, 939 dma_unmap_addr(cons_rx_buf, mapping)); 940 prod_rx_buf->data = cons_rx_buf->data; 941 *prod_bd = *cons_bd; 942} 943 944/************************* Init ******************************************/ 945 946/* returns func by VN for current port */ 947static inline int func_by_vn(struct bnx2x *bp, int vn) 948{ 949 return 2 * vn + BP_PORT(bp); 950} 951 952static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) 953{ 954 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); 955} 956 957/** 958 * bnx2x_func_start - init function 959 * 960 * @bp: driver handle 961 * 962 * Must be called before sending CLIENT_SETUP for the first client. 963 */ 964static inline int bnx2x_func_start(struct bnx2x *bp) 965{ 966 struct bnx2x_func_state_params func_params = {NULL}; 967 struct bnx2x_func_start_params *start_params = 968 &func_params.params.start; 969 970 /* Prepare parameters for function state transitions */ 971 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 972 973 func_params.f_obj = &bp->func_obj; 974 func_params.cmd = BNX2X_F_CMD_START; 975 976 /* Function parameters */ 977 start_params->mf_mode = bp->mf_mode; 978 start_params->sd_vlan_tag = bp->mf_ov; 979 980 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) 981 start_params->network_cos_mode = STATIC_COS; 982 else /* CHIP_IS_E1X */ 983 start_params->network_cos_mode = FW_WRR; 984 985 start_params->gre_tunnel_mode = IPGRE_TUNNEL; 986 start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; 987 988 return bnx2x_func_state_change(bp, &func_params); 989} 990 991/** 992 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format 993 * 994 * @fw_hi: pointer to upper part 995 * @fw_mid: pointer to middle part 996 * @fw_lo: pointer to lower part 997 * @mac: pointer to MAC address 998 */ 999static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, 1000 __le16 *fw_lo, u8 *mac) 1001{ 1002 ((u8 *)fw_hi)[0] = mac[1]; 1003 ((u8 *)fw_hi)[1] = mac[0]; 1004 ((u8 *)fw_mid)[0] = mac[3]; 1005 ((u8 *)fw_mid)[1] = mac[2]; 1006 ((u8 *)fw_lo)[0] = mac[5]; 1007 ((u8 *)fw_lo)[1] = mac[4]; 1008} 1009 1010static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 1011 struct bnx2x_fastpath *fp, int last) 1012{ 1013 int i; 1014 1015 if (fp->disable_tpa) 1016 return; 1017 1018 for (i = 0; i < last; i++) 1019 bnx2x_free_rx_sge(bp, fp, i); 1020} 1021 1022static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1023{ 1024 int i; 1025 1026 for (i = 1; i <= NUM_RX_RINGS; i++) { 1027 struct eth_rx_bd *rx_bd; 1028 1029 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; 1030 rx_bd->addr_hi = 1031 cpu_to_le32(U64_HI(fp->rx_desc_mapping + 1032 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1033 rx_bd->addr_lo = 1034 cpu_to_le32(U64_LO(fp->rx_desc_mapping + 1035 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1036 } 1037} 1038 1039/* Statistics ID are global per chip/path, while Client IDs for E1x are per 1040 * port. 1041 */ 1042static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) 1043{ 1044 struct bnx2x *bp = fp->bp; 1045 if (!CHIP_IS_E1x(bp)) { 1046 /* there are special statistics counters for FCoE 136..140 */ 1047 if (IS_FCOE_FP(fp)) 1048 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1049 return fp->cl_id; 1050 } 1051 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1052} 1053 1054static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, 1055 bnx2x_obj_type obj_type) 1056{ 1057 struct bnx2x *bp = fp->bp; 1058 1059 /* Configure classification DBs */ 1060 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, 1061 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 1062 bnx2x_sp_mapping(bp, mac_rdata), 1063 BNX2X_FILTER_MAC_PENDING, 1064 &bp->sp_state, obj_type, 1065 &bp->macs_pool); 1066} 1067 1068/** 1069 * bnx2x_get_path_func_num - get number of active functions 1070 * 1071 * @bp: driver handle 1072 * 1073 * Calculates the number of active (not hidden) functions on the 1074 * current path. 1075 */ 1076static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) 1077{ 1078 u8 func_num = 0, i; 1079 1080 /* 57710 has only one function per-port */ 1081 if (CHIP_IS_E1(bp)) 1082 return 1; 1083 1084 /* Calculate a number of functions enabled on the current 1085 * PATH/PORT. 1086 */ 1087 if (CHIP_REV_IS_SLOW(bp)) { 1088 if (IS_MF(bp)) 1089 func_num = 4; 1090 else 1091 func_num = 2; 1092 } else { 1093 for (i = 0; i < E1H_FUNC_MAX / 2; i++) { 1094 u32 func_config = 1095 MF_CFG_RD(bp, 1096 func_mf_config[BP_PORT(bp) + 2 * i]. 1097 config); 1098 func_num += 1099 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); 1100 } 1101 } 1102 1103 WARN_ON(!func_num); 1104 1105 return func_num; 1106} 1107 1108static inline void bnx2x_init_bp_objs(struct bnx2x *bp) 1109{ 1110 /* RX_MODE controlling object */ 1111 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); 1112 1113 /* multicast configuration controlling object */ 1114 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, 1115 BP_FUNC(bp), BP_FUNC(bp), 1116 bnx2x_sp(bp, mcast_rdata), 1117 bnx2x_sp_mapping(bp, mcast_rdata), 1118 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, 1119 BNX2X_OBJ_TYPE_RX); 1120 1121 /* Setup CAM credit pools */ 1122 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), 1123 bnx2x_get_path_func_num(bp)); 1124 1125 bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, 1126 bnx2x_get_path_func_num(bp)); 1127 1128 /* RSS configuration object */ 1129 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, 1130 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), 1131 bnx2x_sp(bp, rss_rdata), 1132 bnx2x_sp_mapping(bp, rss_rdata), 1133 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, 1134 BNX2X_OBJ_TYPE_RX); 1135} 1136 1137static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 1138{ 1139 if (CHIP_IS_E1x(fp->bp)) 1140 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; 1141 else 1142 return fp->cl_id; 1143} 1144 1145u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); 1146 1147static inline void bnx2x_init_txdata(struct bnx2x *bp, 1148 struct bnx2x_fp_txdata *txdata, u32 cid, 1149 int txq_index, __le16 *tx_cons_sb, 1150 struct bnx2x_fastpath *fp) 1151{ 1152 txdata->cid = cid; 1153 txdata->txq_index = txq_index; 1154 txdata->tx_cons_sb = tx_cons_sb; 1155 txdata->parent_fp = fp; 1156 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; 1157 1158 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1159 txdata->cid, txdata->txq_index); 1160} 1161 1162static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1163{ 1164 return bp->cnic_base_cl_id + cl_idx + 1165 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; 1166} 1167 1168static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1169{ 1170 /* the 'first' id is allocated for the cnic */ 1171 return bp->base_fw_ndsb; 1172} 1173 1174static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) 1175{ 1176 return bp->igu_base_sb; 1177} 1178 1179static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1180{ 1181 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 1182 unsigned long q_type = 0; 1183 1184 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1185 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1186 BNX2X_FCOE_ETH_CL_ID_IDX); 1187 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); 1188 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1189 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1190 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1191 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), 1192 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, 1193 fp); 1194 1195 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 1196 1197 /* qZone id equals to FW (per path) client id */ 1198 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 1199 /* init shortcut */ 1200 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 1201 bnx2x_rx_ustorm_prods_offset(fp); 1202 1203 /* Configure Queue State object */ 1204 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1205 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1206 1207 /* No multi-CoS for FCoE L2 client */ 1208 BUG_ON(fp->max_cos != 1); 1209 1210 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, 1211 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1212 bnx2x_sp_mapping(bp, q_rdata), q_type); 1213 1214 DP(NETIF_MSG_IFUP, 1215 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 1216 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1217 fp->igu_sb_id); 1218} 1219 1220static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1221 struct bnx2x_fp_txdata *txdata) 1222{ 1223 int cnt = 1000; 1224 1225 while (bnx2x_has_tx_work_unload(txdata)) { 1226 if (!cnt) { 1227 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", 1228 txdata->txq_index, txdata->tx_pkt_prod, 1229 txdata->tx_pkt_cons); 1230#ifdef BNX2X_STOP_ON_ERROR 1231 bnx2x_panic(); 1232 return -EBUSY; 1233#else 1234 break; 1235#endif 1236 } 1237 cnt--; 1238 usleep_range(1000, 2000); 1239 } 1240 1241 return 0; 1242} 1243 1244int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1245 1246static inline void __storm_memset_struct(struct bnx2x *bp, 1247 u32 addr, size_t size, u32 *data) 1248{ 1249 int i; 1250 for (i = 0; i < size/4; i++) 1251 REG_WR(bp, addr + (i * 4), data[i]); 1252} 1253 1254/** 1255 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1256 * 1257 * @bp: driver handle 1258 * @mask: bits that need to be cleared 1259 */ 1260static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) 1261{ 1262 int tout = 5000; /* Wait for 5 secs tops */ 1263 1264 while (tout--) { 1265 smp_mb(); 1266 netif_addr_lock_bh(bp->dev); 1267 if (!(bp->sp_state & mask)) { 1268 netif_addr_unlock_bh(bp->dev); 1269 return true; 1270 } 1271 netif_addr_unlock_bh(bp->dev); 1272 1273 usleep_range(1000, 2000); 1274 } 1275 1276 smp_mb(); 1277 1278 netif_addr_lock_bh(bp->dev); 1279 if (bp->sp_state & mask) { 1280 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", 1281 bp->sp_state, mask); 1282 netif_addr_unlock_bh(bp->dev); 1283 return false; 1284 } 1285 netif_addr_unlock_bh(bp->dev); 1286 1287 return true; 1288} 1289 1290/** 1291 * bnx2x_set_ctx_validation - set CDU context validation values 1292 * 1293 * @bp: driver handle 1294 * @cxt: context of the connection on the host memory 1295 * @cid: SW CID of the connection to be configured 1296 */ 1297void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1298 u32 cid); 1299 1300void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1301 u8 sb_index, u8 disable, u16 usec); 1302void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1303void bnx2x_release_phy_lock(struct bnx2x *bp); 1304 1305/** 1306 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1307 * 1308 * @bp: driver handle 1309 * @mf_cfg: MF configuration 1310 * 1311 */ 1312static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1313{ 1314 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1315 FUNC_MF_CFG_MAX_BW_SHIFT; 1316 if (!max_cfg) { 1317 DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, 1318 "Max BW configured to 0 - using 100 instead\n"); 1319 max_cfg = 100; 1320 } 1321 return max_cfg; 1322} 1323 1324/* checks if HW supports GRO for given MTU */ 1325static inline bool bnx2x_mtu_allows_gro(int mtu) 1326{ 1327 /* gro frags per page */ 1328 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); 1329 1330 /* 1331 * 1. Number of frags should not grow above MAX_SKB_FRAGS 1332 * 2. Frag must fit the page 1333 */ 1334 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1335} 1336 1337/** 1338 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1339 * 1340 * @bp: driver handle 1341 * 1342 */ 1343void bnx2x_get_iscsi_info(struct bnx2x *bp); 1344 1345/** 1346 * bnx2x_link_sync_notify - send notification to other functions. 1347 * 1348 * @bp: driver handle 1349 * 1350 */ 1351static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 1352{ 1353 int func; 1354 int vn; 1355 1356 /* Set the attention towards other drivers on the same port */ 1357 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 1358 if (vn == BP_VN(bp)) 1359 continue; 1360 1361 func = func_by_vn(bp, vn); 1362 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 1363 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 1364 } 1365} 1366 1367/** 1368 * bnx2x_update_drv_flags - update flags in shmem 1369 * 1370 * @bp: driver handle 1371 * @flags: flags to update 1372 * @set: set or clear 1373 * 1374 */ 1375static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) 1376{ 1377 if (SHMEM2_HAS(bp, drv_flags)) { 1378 u32 drv_flags; 1379 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1380 drv_flags = SHMEM2_RD(bp, drv_flags); 1381 1382 if (set) 1383 SET_FLAGS(drv_flags, flags); 1384 else 1385 RESET_FLAGS(drv_flags, flags); 1386 1387 SHMEM2_WR(bp, drv_flags, drv_flags); 1388 DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); 1389 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1390 } 1391} 1392 1393static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1394{ 1395 if (is_valid_ether_addr(addr) || 1396 (is_zero_ether_addr(addr) && 1397 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))) 1398 return true; 1399 1400 return false; 1401} 1402 1403/** 1404 * bnx2x_fill_fw_str - Fill buffer with FW version string 1405 * 1406 * @bp: driver handle 1407 * @buf: character buffer to fill with the fw name 1408 * @buf_len: length of the above buffer 1409 * 1410 */ 1411void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); 1412 1413int bnx2x_drain_tx_queues(struct bnx2x *bp); 1414void bnx2x_squeeze_objects(struct bnx2x *bp); 1415 1416#endif /* BNX2X_CMN_H */ 1417