bnx2x_cmn.h revision 07ba6af465aa551326e18133c085035d2059cabc
1/* bnx2x_cmn.h: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2012 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17#ifndef BNX2X_CMN_H 18#define BNX2X_CMN_H 19 20#include <linux/types.h> 21#include <linux/pci.h> 22#include <linux/netdevice.h> 23#include <linux/etherdevice.h> 24 25 26#include "bnx2x.h" 27#include "bnx2x_sriov.h" 28 29/* This is used as a replacement for an MCP if it's not present */ 30extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ 31 32extern int num_queues; 33extern int int_mode; 34 35/************************ Macros ********************************/ 36#define BNX2X_PCI_FREE(x, y, size) \ 37 do { \ 38 if (x) { \ 39 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \ 40 x = NULL; \ 41 y = 0; \ 42 } \ 43 } while (0) 44 45#define BNX2X_FREE(x) \ 46 do { \ 47 if (x) { \ 48 kfree((void *)x); \ 49 x = NULL; \ 50 } \ 51 } while (0) 52 53#define BNX2X_PCI_ALLOC(x, y, size) \ 54 do { \ 55 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 56 if (x == NULL) \ 57 goto alloc_mem_err; \ 58 memset((void *)x, 0, size); \ 59 } while (0) 60 61#define BNX2X_ALLOC(x, size) \ 62 do { \ 63 x = kzalloc(size, GFP_KERNEL); \ 64 if (x == NULL) \ 65 goto alloc_mem_err; \ 66 } while (0) 67 68/*********************** Interfaces **************************** 69 * Functions that need to be implemented by each driver version 70 */ 71/* Init */ 72 73/** 74 * bnx2x_send_unload_req - request unload mode from the MCP. 75 * 76 * @bp: driver handle 77 * @unload_mode: requested function's unload mode 78 * 79 * Return unload mode returned by the MCP: COMMON, PORT or FUNC. 80 */ 81u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); 82 83/** 84 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. 85 * 86 * @bp: driver handle 87 * @keep_link: true iff link should be kept up 88 */ 89void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); 90 91/** 92 * bnx2x_config_rss_pf - configure RSS parameters in a PF. 93 * 94 * @bp: driver handle 95 * @rss_obj: RSS object to use 96 * @ind_table: indirection table to configure 97 * @config_hash: re-configure RSS hash keys configuration 98 */ 99int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, 100 bool config_hash); 101 102/** 103 * bnx2x__init_func_obj - init function object 104 * 105 * @bp: driver handle 106 * 107 * Initializes the Function Object with the appropriate 108 * parameters which include a function slow path driver 109 * interface. 110 */ 111void bnx2x__init_func_obj(struct bnx2x *bp); 112 113/** 114 * bnx2x_setup_queue - setup eth queue. 115 * 116 * @bp: driver handle 117 * @fp: pointer to the fastpath structure 118 * @leading: boolean 119 * 120 */ 121int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, 122 bool leading); 123 124/** 125 * bnx2x_setup_leading - bring up a leading eth queue. 126 * 127 * @bp: driver handle 128 */ 129int bnx2x_setup_leading(struct bnx2x *bp); 130 131/** 132 * bnx2x_fw_command - send the MCP a request 133 * 134 * @bp: driver handle 135 * @command: request 136 * @param: request's parameter 137 * 138 * block until there is a reply 139 */ 140u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 141 142/** 143 * bnx2x_initial_phy_init - initialize link parameters structure variables. 144 * 145 * @bp: driver handle 146 * @load_mode: current mode 147 */ 148int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 149 150/** 151 * bnx2x_link_set - configure hw according to link parameters structure. 152 * 153 * @bp: driver handle 154 */ 155void bnx2x_link_set(struct bnx2x *bp); 156 157/** 158 * bnx2x_force_link_reset - Forces link reset, and put the PHY 159 * in reset as well. 160 * 161 * @bp: driver handle 162 */ 163void bnx2x_force_link_reset(struct bnx2x *bp); 164 165/** 166 * bnx2x_link_test - query link status. 167 * 168 * @bp: driver handle 169 * @is_serdes: bool 170 * 171 * Returns 0 if link is UP. 172 */ 173u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); 174 175/** 176 * bnx2x_drv_pulse - write driver pulse to shmem 177 * 178 * @bp: driver handle 179 * 180 * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox 181 * in the shmem. 182 */ 183void bnx2x_drv_pulse(struct bnx2x *bp); 184 185/** 186 * bnx2x_igu_ack_sb - update IGU with current SB value 187 * 188 * @bp: driver handle 189 * @igu_sb_id: SB id 190 * @segment: SB segment 191 * @index: SB index 192 * @op: SB operation 193 * @update: is HW update required 194 */ 195void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, 196 u16 index, u8 op, u8 update); 197 198/* Disable transactions from chip to host */ 199void bnx2x_pf_disable(struct bnx2x *bp); 200int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val); 201 202/** 203 * bnx2x__link_status_update - handles link status change. 204 * 205 * @bp: driver handle 206 */ 207void bnx2x__link_status_update(struct bnx2x *bp); 208 209/** 210 * bnx2x_link_report - report link status to upper layer. 211 * 212 * @bp: driver handle 213 */ 214void bnx2x_link_report(struct bnx2x *bp); 215 216/* None-atomic version of bnx2x_link_report() */ 217void __bnx2x_link_report(struct bnx2x *bp); 218 219/** 220 * bnx2x_get_mf_speed - calculate MF speed. 221 * 222 * @bp: driver handle 223 * 224 * Takes into account current linespeed and MF configuration. 225 */ 226u16 bnx2x_get_mf_speed(struct bnx2x *bp); 227 228/** 229 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler 230 * 231 * @irq: irq number 232 * @dev_instance: private instance 233 */ 234irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance); 235 236/** 237 * bnx2x_interrupt - non MSI-X interrupt handler 238 * 239 * @irq: irq number 240 * @dev_instance: private instance 241 */ 242irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 243 244/** 245 * bnx2x_cnic_notify - send command to cnic driver 246 * 247 * @bp: driver handle 248 * @cmd: command 249 */ 250int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); 251 252/** 253 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information 254 * 255 * @bp: driver handle 256 */ 257void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); 258 259/** 260 * bnx2x_setup_cnic_info - provides cnic with updated info 261 * 262 * @bp: driver handle 263 */ 264void bnx2x_setup_cnic_info(struct bnx2x *bp); 265 266/** 267 * bnx2x_int_enable - enable HW interrupts. 268 * 269 * @bp: driver handle 270 */ 271void bnx2x_int_enable(struct bnx2x *bp); 272 273/** 274 * bnx2x_int_disable_sync - disable interrupts. 275 * 276 * @bp: driver handle 277 * @disable_hw: true, disable HW interrupts. 278 * 279 * This function ensures that there are no 280 * ISRs or SP DPCs (sp_task) are running after it returns. 281 */ 282void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 283 284/** 285 * bnx2x_nic_init_cnic - init driver internals for cnic. 286 * 287 * @bp: driver handle 288 * @load_code: COMMON, PORT or FUNCTION 289 * 290 * Initializes: 291 * - rings 292 * - status blocks 293 * - etc. 294 */ 295void bnx2x_nic_init_cnic(struct bnx2x *bp); 296 297/** 298 * bnx2x_nic_init - init driver internals. 299 * 300 * @bp: driver handle 301 * 302 * Initializes: 303 * - rings 304 * - status blocks 305 * - etc. 306 */ 307void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 308/** 309 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic. 310 * 311 * @bp: driver handle 312 */ 313int bnx2x_alloc_mem_cnic(struct bnx2x *bp); 314/** 315 * bnx2x_alloc_mem - allocate driver's memory. 316 * 317 * @bp: driver handle 318 */ 319int bnx2x_alloc_mem(struct bnx2x *bp); 320 321/** 322 * bnx2x_free_mem_cnic - release driver's memory for cnic. 323 * 324 * @bp: driver handle 325 */ 326void bnx2x_free_mem_cnic(struct bnx2x *bp); 327/** 328 * bnx2x_free_mem - release driver's memory. 329 * 330 * @bp: driver handle 331 */ 332void bnx2x_free_mem(struct bnx2x *bp); 333 334/** 335 * bnx2x_set_num_queues - set number of queues according to mode. 336 * 337 * @bp: driver handle 338 */ 339void bnx2x_set_num_queues(struct bnx2x *bp); 340 341/** 342 * bnx2x_chip_cleanup - cleanup chip internals. 343 * 344 * @bp: driver handle 345 * @unload_mode: COMMON, PORT, FUNCTION 346 * @keep_link: true iff link should be kept up. 347 * 348 * - Cleanup MAC configuration. 349 * - Closes clients. 350 * - etc. 351 */ 352void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); 353 354/** 355 * bnx2x_acquire_hw_lock - acquire HW lock. 356 * 357 * @bp: driver handle 358 * @resource: resource bit which was locked 359 */ 360int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); 361 362/** 363 * bnx2x_release_hw_lock - release HW lock. 364 * 365 * @bp: driver handle 366 * @resource: resource bit which was locked 367 */ 368int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); 369 370/** 371 * bnx2x_release_leader_lock - release recovery leader lock 372 * 373 * @bp: driver handle 374 */ 375int bnx2x_release_leader_lock(struct bnx2x *bp); 376 377/** 378 * bnx2x_set_eth_mac - configure eth MAC address in the HW 379 * 380 * @bp: driver handle 381 * @set: set or clear 382 * 383 * Configures according to the value in netdev->dev_addr. 384 */ 385int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); 386 387/** 388 * bnx2x_set_rx_mode - set MAC filtering configurations. 389 * 390 * @dev: netdevice 391 * 392 * called with netif_tx_lock from dev_mcast.c 393 * If bp->state is OPEN, should be called with 394 * netif_addr_lock_bh() 395 */ 396void bnx2x_set_rx_mode(struct net_device *dev); 397 398/** 399 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. 400 * 401 * @bp: driver handle 402 * 403 * If bp->state is OPEN, should be called with 404 * netif_addr_lock_bh(). 405 */ 406void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 407 408/** 409 * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. 410 * 411 * @bp: driver handle 412 * @cl_id: client id 413 * @rx_mode_flags: rx mode configuration 414 * @rx_accept_flags: rx accept configuration 415 * @tx_accept_flags: tx accept configuration (tx switch) 416 * @ramrod_flags: ramrod configuration 417 */ 418void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, 419 unsigned long rx_mode_flags, 420 unsigned long rx_accept_flags, 421 unsigned long tx_accept_flags, 422 unsigned long ramrod_flags); 423 424/* Parity errors related */ 425void bnx2x_set_pf_load(struct bnx2x *bp); 426bool bnx2x_clear_pf_load(struct bnx2x *bp); 427bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); 428bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); 429void bnx2x_set_reset_in_progress(struct bnx2x *bp); 430void bnx2x_set_reset_global(struct bnx2x *bp); 431void bnx2x_disable_close_the_gate(struct bnx2x *bp); 432int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 433 434/** 435 * bnx2x_sp_event - handle ramrods completion. 436 * 437 * @fp: fastpath handle for the event 438 * @rr_cqe: eth_rx_cqe 439 */ 440void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 441 442/** 443 * bnx2x_ilt_set_info - prepare ILT configurations. 444 * 445 * @bp: driver handle 446 */ 447void bnx2x_ilt_set_info(struct bnx2x *bp); 448 449/** 450 * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC 451 * and TM. 452 * 453 * @bp: driver handle 454 */ 455void bnx2x_ilt_set_info_cnic(struct bnx2x *bp); 456 457/** 458 * bnx2x_dcbx_init - initialize dcbx protocol. 459 * 460 * @bp: driver handle 461 */ 462void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); 463 464/** 465 * bnx2x_set_power_state - set power state to the requested value. 466 * 467 * @bp: driver handle 468 * @state: required state D0 or D3hot 469 * 470 * Currently only D0 and D3hot are supported. 471 */ 472int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 473 474/** 475 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW. 476 * 477 * @bp: driver handle 478 * @value: new value 479 */ 480void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); 481/* Error handling */ 482void bnx2x_panic_dump(struct bnx2x *bp); 483 484void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); 485 486/* validate currect fw is loaded */ 487bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); 488 489/* dev_close main block */ 490int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); 491 492/* dev_open main block */ 493int bnx2x_nic_load(struct bnx2x *bp, int load_mode); 494 495/* hard_xmit callback */ 496netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 497 498/* setup_tc callback */ 499int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 500 501int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); 502 503/* select_queue callback */ 504u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 505 506static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 507 struct bnx2x_fastpath *fp, 508 u16 bd_prod, u16 rx_comp_prod, 509 u16 rx_sge_prod) 510{ 511 struct ustorm_eth_rx_producers rx_prods = {0}; 512 u32 i; 513 514 /* Update producers */ 515 rx_prods.bd_prod = bd_prod; 516 rx_prods.cqe_prod = rx_comp_prod; 517 rx_prods.sge_prod = rx_sge_prod; 518 519 /* Make sure that the BD and SGE data is updated before updating the 520 * producers since FW might read the BD/SGE right after the producer 521 * is updated. 522 * This is only applicable for weak-ordered memory model archs such 523 * as IA-64. The following barrier is also mandatory since FW will 524 * assumes BDs must have buffers. 525 */ 526 wmb(); 527 528 for (i = 0; i < sizeof(rx_prods)/4; i++) 529 REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, 530 ((u32 *)&rx_prods)[i]); 531 532 mmiowb(); /* keep prod updates ordered */ 533 534 DP(NETIF_MSG_RX_STATUS, 535 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", 536 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 537} 538 539/* reload helper */ 540int bnx2x_reload_if_running(struct net_device *dev); 541 542int bnx2x_change_mac_addr(struct net_device *dev, void *p); 543 544/* NAPI poll Rx part */ 545int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); 546 547/* NAPI poll Tx part */ 548int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); 549 550/* suspend/resume callbacks */ 551int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); 552int bnx2x_resume(struct pci_dev *pdev); 553 554/* Release IRQ vectors */ 555void bnx2x_free_irq(struct bnx2x *bp); 556 557void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); 558void bnx2x_free_fp_mem(struct bnx2x *bp); 559int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); 560int bnx2x_alloc_fp_mem(struct bnx2x *bp); 561void bnx2x_init_rx_rings(struct bnx2x *bp); 562void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); 563void bnx2x_free_skbs_cnic(struct bnx2x *bp); 564void bnx2x_free_skbs(struct bnx2x *bp); 565void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 566void bnx2x_netif_start(struct bnx2x *bp); 567int bnx2x_load_cnic(struct bnx2x *bp); 568 569/** 570 * bnx2x_enable_msix - set msix configuration. 571 * 572 * @bp: driver handle 573 * 574 * fills msix_table, requests vectors, updates num_queues 575 * according to number of available vectors. 576 */ 577int bnx2x_enable_msix(struct bnx2x *bp); 578 579/** 580 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 581 * 582 * @bp: driver handle 583 */ 584int bnx2x_enable_msi(struct bnx2x *bp); 585 586/** 587 * bnx2x_poll - NAPI callback 588 * 589 * @napi: napi structure 590 * @budget: 591 * 592 */ 593int bnx2x_poll(struct napi_struct *napi, int budget); 594 595/** 596 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure 597 * 598 * @bp: driver handle 599 */ 600int bnx2x_alloc_mem_bp(struct bnx2x *bp); 601 602/** 603 * bnx2x_free_mem_bp - release memories outsize main driver structure 604 * 605 * @bp: driver handle 606 */ 607void bnx2x_free_mem_bp(struct bnx2x *bp); 608 609/** 610 * bnx2x_change_mtu - change mtu netdev callback 611 * 612 * @dev: net device 613 * @new_mtu: requested mtu 614 * 615 */ 616int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 617 618#ifdef NETDEV_FCOE_WWNN 619/** 620 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 621 * 622 * @dev: net_device 623 * @wwn: output buffer 624 * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) 625 * 626 */ 627int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 628#endif 629 630netdev_features_t bnx2x_fix_features(struct net_device *dev, 631 netdev_features_t features); 632int bnx2x_set_features(struct net_device *dev, netdev_features_t features); 633 634/** 635 * bnx2x_tx_timeout - tx timeout netdev callback 636 * 637 * @dev: net device 638 */ 639void bnx2x_tx_timeout(struct net_device *dev); 640 641/*********************** Inlines **********************************/ 642/*********************** Fast path ********************************/ 643static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 644{ 645 barrier(); /* status block is written to by the chip */ 646 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; 647} 648 649static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, 650 u8 segment, u16 index, u8 op, 651 u8 update, u32 igu_addr) 652{ 653 struct igu_regular cmd_data = {0}; 654 655 cmd_data.sb_id_and_flags = 656 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 657 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 658 (update << IGU_REGULAR_BUPDATE_SHIFT) | 659 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 660 661 DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 662 cmd_data.sb_id_and_flags, igu_addr); 663 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags); 664 665 /* Make sure that ACK is written */ 666 mmiowb(); 667 barrier(); 668} 669 670static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 671 u8 storm, u16 index, u8 op, u8 update) 672{ 673 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 674 COMMAND_REG_INT_ACK); 675 struct igu_ack_register igu_ack; 676 677 igu_ack.status_block_index = index; 678 igu_ack.sb_id_and_flags = 679 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 680 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 681 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 682 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 683 684 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); 685 686 /* Make sure that ACK is written */ 687 mmiowb(); 688 barrier(); 689} 690 691static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, 692 u16 index, u8 op, u8 update) 693{ 694 if (bp->common.int_block == INT_BLOCK_HC) 695 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update); 696 else { 697 u8 segment; 698 699 if (CHIP_INT_MODE_IS_BC(bp)) 700 segment = storm; 701 else if (igu_sb_id != bp->igu_dsb_id) 702 segment = IGU_SEG_ACCESS_DEF; 703 else if (storm == ATTENTION_ID) 704 segment = IGU_SEG_ACCESS_ATTN; 705 else 706 segment = IGU_SEG_ACCESS_DEF; 707 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update); 708 } 709} 710 711static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp) 712{ 713 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 714 COMMAND_REG_SIMD_MASK); 715 u32 result = REG_RD(bp, hc_addr); 716 717 barrier(); 718 return result; 719} 720 721static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp) 722{ 723 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8); 724 u32 result = REG_RD(bp, igu_addr); 725 726 DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n", 727 result, igu_addr); 728 729 barrier(); 730 return result; 731} 732 733static inline u16 bnx2x_ack_int(struct bnx2x *bp) 734{ 735 barrier(); 736 if (bp->common.int_block == INT_BLOCK_HC) 737 return bnx2x_hc_ack_int(bp); 738 else 739 return bnx2x_igu_ack_int(bp); 740} 741 742static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) 743{ 744 /* Tell compiler that consumer and producer can change */ 745 barrier(); 746 return txdata->tx_pkt_prod != txdata->tx_pkt_cons; 747} 748 749static inline u16 bnx2x_tx_avail(struct bnx2x *bp, 750 struct bnx2x_fp_txdata *txdata) 751{ 752 s16 used; 753 u16 prod; 754 u16 cons; 755 756 prod = txdata->tx_bd_prod; 757 cons = txdata->tx_bd_cons; 758 759 used = SUB_S16(prod, cons); 760 761#ifdef BNX2X_STOP_ON_ERROR 762 WARN_ON(used < 0); 763 WARN_ON(used > txdata->tx_ring_size); 764 WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL); 765#endif 766 767 return (s16)(txdata->tx_ring_size) - used; 768} 769 770static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) 771{ 772 u16 hw_cons; 773 774 /* Tell compiler that status block fields can change */ 775 barrier(); 776 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 777 return hw_cons != txdata->tx_pkt_cons; 778} 779 780static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 781{ 782 u8 cos; 783 for_each_cos_in_tx_queue(fp, cos) 784 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 785 return true; 786 return false; 787} 788 789static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 790{ 791 u16 rx_cons_sb; 792 793 /* Tell compiler that status block fields can change */ 794 barrier(); 795 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 796 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 797 rx_cons_sb++; 798 return (fp->rx_comp_cons != rx_cons_sb); 799} 800 801/** 802 * bnx2x_tx_disable - disables tx from stack point of view 803 * 804 * @bp: driver handle 805 */ 806static inline void bnx2x_tx_disable(struct bnx2x *bp) 807{ 808 netif_tx_disable(bp->dev); 809 netif_carrier_off(bp->dev); 810} 811 812static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 813 struct bnx2x_fastpath *fp, u16 index) 814{ 815 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; 816 struct page *page = sw_buf->page; 817 struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; 818 819 /* Skip "next page" elements */ 820 if (!page) 821 return; 822 823 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 824 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 825 __free_pages(page, PAGES_PER_SGE_SHIFT); 826 827 sw_buf->page = NULL; 828 sge->addr_hi = 0; 829 sge->addr_lo = 0; 830} 831 832static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) 833{ 834 int i; 835 836 /* Add NAPI objects */ 837 for_each_rx_queue_cnic(bp, i) 838 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 839 bnx2x_poll, BNX2X_NAPI_WEIGHT); 840} 841 842static inline void bnx2x_add_all_napi(struct bnx2x *bp) 843{ 844 int i; 845 846 /* Add NAPI objects */ 847 for_each_eth_queue(bp, i) 848 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 849 bnx2x_poll, BNX2X_NAPI_WEIGHT); 850} 851 852static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) 853{ 854 int i; 855 856 for_each_rx_queue_cnic(bp, i) 857 netif_napi_del(&bnx2x_fp(bp, i, napi)); 858} 859 860static inline void bnx2x_del_all_napi(struct bnx2x *bp) 861{ 862 int i; 863 864 for_each_eth_queue(bp, i) 865 netif_napi_del(&bnx2x_fp(bp, i, napi)); 866} 867 868int bnx2x_set_int_mode(struct bnx2x *bp); 869 870static inline void bnx2x_disable_msi(struct bnx2x *bp) 871{ 872 if (bp->flags & USING_MSIX_FLAG) { 873 pci_disable_msix(bp->pdev); 874 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG); 875 } else if (bp->flags & USING_MSI_FLAG) { 876 pci_disable_msi(bp->pdev); 877 bp->flags &= ~USING_MSI_FLAG; 878 } 879} 880 881static inline int bnx2x_calc_num_queues(struct bnx2x *bp) 882{ 883 return num_queues ? 884 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : 885 min_t(int, netif_get_num_default_rss_queues(), 886 BNX2X_MAX_QUEUES(bp)); 887} 888 889static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 890{ 891 int i, j; 892 893 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 894 int idx = RX_SGE_CNT * i - 1; 895 896 for (j = 0; j < 2; j++) { 897 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); 898 idx--; 899 } 900 } 901} 902 903static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) 904{ 905 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ 906 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); 907 908 /* Clear the two last indices in the page to 1: 909 these are the indices that correspond to the "next" element, 910 hence will never be indicated and should be removed from 911 the calculations. */ 912 bnx2x_clear_sge_mask_next_elems(fp); 913} 914 915/* note that we are not allocating a new buffer, 916 * we are just moving one from cons to prod 917 * we are not creating a new mapping, 918 * so there is no need to check for dma_mapping_error(). 919 */ 920static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp, 921 u16 cons, u16 prod) 922{ 923 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 924 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 925 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 926 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 927 928 dma_unmap_addr_set(prod_rx_buf, mapping, 929 dma_unmap_addr(cons_rx_buf, mapping)); 930 prod_rx_buf->data = cons_rx_buf->data; 931 *prod_bd = *cons_bd; 932} 933 934/************************* Init ******************************************/ 935 936/* returns func by VN for current port */ 937static inline int func_by_vn(struct bnx2x *bp, int vn) 938{ 939 return 2 * vn + BP_PORT(bp); 940} 941 942static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) 943{ 944 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); 945} 946 947/** 948 * bnx2x_func_start - init function 949 * 950 * @bp: driver handle 951 * 952 * Must be called before sending CLIENT_SETUP for the first client. 953 */ 954static inline int bnx2x_func_start(struct bnx2x *bp) 955{ 956 struct bnx2x_func_state_params func_params = {NULL}; 957 struct bnx2x_func_start_params *start_params = 958 &func_params.params.start; 959 960 /* Prepare parameters for function state transitions */ 961 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 962 963 func_params.f_obj = &bp->func_obj; 964 func_params.cmd = BNX2X_F_CMD_START; 965 966 /* Function parameters */ 967 start_params->mf_mode = bp->mf_mode; 968 start_params->sd_vlan_tag = bp->mf_ov; 969 970 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) 971 start_params->network_cos_mode = STATIC_COS; 972 else /* CHIP_IS_E1X */ 973 start_params->network_cos_mode = FW_WRR; 974 975 return bnx2x_func_state_change(bp, &func_params); 976} 977 978 979/** 980 * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format 981 * 982 * @fw_hi: pointer to upper part 983 * @fw_mid: pointer to middle part 984 * @fw_lo: pointer to lower part 985 * @mac: pointer to MAC address 986 */ 987static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, 988 u8 *mac) 989{ 990 ((u8 *)fw_hi)[0] = mac[1]; 991 ((u8 *)fw_hi)[1] = mac[0]; 992 ((u8 *)fw_mid)[0] = mac[3]; 993 ((u8 *)fw_mid)[1] = mac[2]; 994 ((u8 *)fw_lo)[0] = mac[5]; 995 ((u8 *)fw_lo)[1] = mac[4]; 996} 997 998static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 999 struct bnx2x_fastpath *fp, int last) 1000{ 1001 int i; 1002 1003 if (fp->disable_tpa) 1004 return; 1005 1006 for (i = 0; i < last; i++) 1007 bnx2x_free_rx_sge(bp, fp, i); 1008} 1009 1010static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 1011{ 1012 int i; 1013 1014 for (i = 1; i <= NUM_RX_RINGS; i++) { 1015 struct eth_rx_bd *rx_bd; 1016 1017 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2]; 1018 rx_bd->addr_hi = 1019 cpu_to_le32(U64_HI(fp->rx_desc_mapping + 1020 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1021 rx_bd->addr_lo = 1022 cpu_to_le32(U64_LO(fp->rx_desc_mapping + 1023 BCM_PAGE_SIZE*(i % NUM_RX_RINGS))); 1024 } 1025} 1026 1027/* Statistics ID are global per chip/path, while Client IDs for E1x are per 1028 * port. 1029 */ 1030static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) 1031{ 1032 struct bnx2x *bp = fp->bp; 1033 if (!CHIP_IS_E1x(bp)) { 1034 /* there are special statistics counters for FCoE 136..140 */ 1035 if (IS_FCOE_FP(fp)) 1036 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1037 return fp->cl_id; 1038 } 1039 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1040} 1041 1042static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, 1043 bnx2x_obj_type obj_type) 1044{ 1045 struct bnx2x *bp = fp->bp; 1046 1047 /* Configure classification DBs */ 1048 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, 1049 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), 1050 bnx2x_sp_mapping(bp, mac_rdata), 1051 BNX2X_FILTER_MAC_PENDING, 1052 &bp->sp_state, obj_type, 1053 &bp->macs_pool); 1054} 1055 1056/** 1057 * bnx2x_get_path_func_num - get number of active functions 1058 * 1059 * @bp: driver handle 1060 * 1061 * Calculates the number of active (not hidden) functions on the 1062 * current path. 1063 */ 1064static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) 1065{ 1066 u8 func_num = 0, i; 1067 1068 /* 57710 has only one function per-port */ 1069 if (CHIP_IS_E1(bp)) 1070 return 1; 1071 1072 /* Calculate a number of functions enabled on the current 1073 * PATH/PORT. 1074 */ 1075 if (CHIP_REV_IS_SLOW(bp)) { 1076 if (IS_MF(bp)) 1077 func_num = 4; 1078 else 1079 func_num = 2; 1080 } else { 1081 for (i = 0; i < E1H_FUNC_MAX / 2; i++) { 1082 u32 func_config = 1083 MF_CFG_RD(bp, 1084 func_mf_config[BP_PORT(bp) + 2 * i]. 1085 config); 1086 func_num += 1087 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); 1088 } 1089 } 1090 1091 WARN_ON(!func_num); 1092 1093 return func_num; 1094} 1095 1096static inline void bnx2x_init_bp_objs(struct bnx2x *bp) 1097{ 1098 /* RX_MODE controlling object */ 1099 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); 1100 1101 /* multicast configuration controlling object */ 1102 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, 1103 BP_FUNC(bp), BP_FUNC(bp), 1104 bnx2x_sp(bp, mcast_rdata), 1105 bnx2x_sp_mapping(bp, mcast_rdata), 1106 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, 1107 BNX2X_OBJ_TYPE_RX); 1108 1109 /* Setup CAM credit pools */ 1110 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), 1111 bnx2x_get_path_func_num(bp)); 1112 1113 bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1, 1114 bnx2x_get_path_func_num(bp)); 1115 1116 /* RSS configuration object */ 1117 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, 1118 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), 1119 bnx2x_sp(bp, rss_rdata), 1120 bnx2x_sp_mapping(bp, rss_rdata), 1121 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, 1122 BNX2X_OBJ_TYPE_RX); 1123} 1124 1125static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) 1126{ 1127 if (CHIP_IS_E1x(fp->bp)) 1128 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; 1129 else 1130 return fp->cl_id; 1131} 1132 1133u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); 1134 1135static inline void bnx2x_init_txdata(struct bnx2x *bp, 1136 struct bnx2x_fp_txdata *txdata, u32 cid, 1137 int txq_index, __le16 *tx_cons_sb, 1138 struct bnx2x_fastpath *fp) 1139{ 1140 txdata->cid = cid; 1141 txdata->txq_index = txq_index; 1142 txdata->tx_cons_sb = tx_cons_sb; 1143 txdata->parent_fp = fp; 1144 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; 1145 1146 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", 1147 txdata->cid, txdata->txq_index); 1148} 1149 1150static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1151{ 1152 return bp->cnic_base_cl_id + cl_idx + 1153 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; 1154} 1155 1156static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) 1157{ 1158 1159 /* the 'first' id is allocated for the cnic */ 1160 return bp->base_fw_ndsb; 1161} 1162 1163static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) 1164{ 1165 return bp->igu_base_sb; 1166} 1167 1168 1169static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) 1170{ 1171 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 1172 unsigned long q_type = 0; 1173 1174 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); 1175 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, 1176 BNX2X_FCOE_ETH_CL_ID_IDX); 1177 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); 1178 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; 1179 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; 1180 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; 1181 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), 1182 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, 1183 fp); 1184 1185 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); 1186 1187 /* qZone id equals to FW (per path) client id */ 1188 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); 1189 /* init shortcut */ 1190 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = 1191 bnx2x_rx_ustorm_prods_offset(fp); 1192 1193 /* Configure Queue State object */ 1194 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1195 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1196 1197 /* No multi-CoS for FCoE L2 client */ 1198 BUG_ON(fp->max_cos != 1); 1199 1200 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, 1201 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), 1202 bnx2x_sp_mapping(bp, q_rdata), q_type); 1203 1204 DP(NETIF_MSG_IFUP, 1205 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", 1206 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1207 fp->igu_sb_id); 1208} 1209 1210static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1211 struct bnx2x_fp_txdata *txdata) 1212{ 1213 int cnt = 1000; 1214 1215 while (bnx2x_has_tx_work_unload(txdata)) { 1216 if (!cnt) { 1217 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", 1218 txdata->txq_index, txdata->tx_pkt_prod, 1219 txdata->tx_pkt_cons); 1220#ifdef BNX2X_STOP_ON_ERROR 1221 bnx2x_panic(); 1222 return -EBUSY; 1223#else 1224 break; 1225#endif 1226 } 1227 cnt--; 1228 usleep_range(1000, 1000); 1229 } 1230 1231 return 0; 1232} 1233 1234int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1235 1236static inline void __storm_memset_struct(struct bnx2x *bp, 1237 u32 addr, size_t size, u32 *data) 1238{ 1239 int i; 1240 for (i = 0; i < size/4; i++) 1241 REG_WR(bp, addr + (i * 4), data[i]); 1242} 1243 1244/** 1245 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1246 * 1247 * @bp: driver handle 1248 * @mask: bits that need to be cleared 1249 */ 1250static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) 1251{ 1252 int tout = 5000; /* Wait for 5 secs tops */ 1253 1254 while (tout--) { 1255 smp_mb(); 1256 netif_addr_lock_bh(bp->dev); 1257 if (!(bp->sp_state & mask)) { 1258 netif_addr_unlock_bh(bp->dev); 1259 return true; 1260 } 1261 netif_addr_unlock_bh(bp->dev); 1262 1263 usleep_range(1000, 1000); 1264 } 1265 1266 smp_mb(); 1267 1268 netif_addr_lock_bh(bp->dev); 1269 if (bp->sp_state & mask) { 1270 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n", 1271 bp->sp_state, mask); 1272 netif_addr_unlock_bh(bp->dev); 1273 return false; 1274 } 1275 netif_addr_unlock_bh(bp->dev); 1276 1277 return true; 1278} 1279 1280/** 1281 * bnx2x_set_ctx_validation - set CDU context validation values 1282 * 1283 * @bp: driver handle 1284 * @cxt: context of the connection on the host memory 1285 * @cid: SW CID of the connection to be configured 1286 */ 1287void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 1288 u32 cid); 1289 1290void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 1291 u8 sb_index, u8 disable, u16 usec); 1292void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1293void bnx2x_release_phy_lock(struct bnx2x *bp); 1294 1295/** 1296 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration. 1297 * 1298 * @bp: driver handle 1299 * @mf_cfg: MF configuration 1300 * 1301 */ 1302static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) 1303{ 1304 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1305 FUNC_MF_CFG_MAX_BW_SHIFT; 1306 if (!max_cfg) { 1307 DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL, 1308 "Max BW configured to 0 - using 100 instead\n"); 1309 max_cfg = 100; 1310 } 1311 return max_cfg; 1312} 1313 1314/* checks if HW supports GRO for given MTU */ 1315static inline bool bnx2x_mtu_allows_gro(int mtu) 1316{ 1317 /* gro frags per page */ 1318 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE); 1319 1320 /* 1321 * 1. number of frags should not grow above MAX_SKB_FRAGS 1322 * 2. frag must fit the page 1323 */ 1324 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1325} 1326 1327/** 1328 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1329 * 1330 * @bp: driver handle 1331 * 1332 */ 1333void bnx2x_get_iscsi_info(struct bnx2x *bp); 1334 1335/** 1336 * bnx2x_link_sync_notify - send notification to other functions. 1337 * 1338 * @bp: driver handle 1339 * 1340 */ 1341static inline void bnx2x_link_sync_notify(struct bnx2x *bp) 1342{ 1343 int func; 1344 int vn; 1345 1346 /* Set the attention towards other drivers on the same port */ 1347 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 1348 if (vn == BP_VN(bp)) 1349 continue; 1350 1351 func = func_by_vn(bp, vn); 1352 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + 1353 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); 1354 } 1355} 1356 1357/** 1358 * bnx2x_update_drv_flags - update flags in shmem 1359 * 1360 * @bp: driver handle 1361 * @flags: flags to update 1362 * @set: set or clear 1363 * 1364 */ 1365static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) 1366{ 1367 if (SHMEM2_HAS(bp, drv_flags)) { 1368 u32 drv_flags; 1369 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1370 drv_flags = SHMEM2_RD(bp, drv_flags); 1371 1372 if (set) 1373 SET_FLAGS(drv_flags, flags); 1374 else 1375 RESET_FLAGS(drv_flags, flags); 1376 1377 SHMEM2_WR(bp, drv_flags, drv_flags); 1378 DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags); 1379 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS); 1380 } 1381} 1382 1383static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1384{ 1385 if (is_valid_ether_addr(addr) || 1386 (is_zero_ether_addr(addr) && 1387 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))) 1388 return true; 1389 1390 return false; 1391} 1392 1393/** 1394 * bnx2x_fill_fw_str - Fill buffer with FW version string. 1395 * 1396 * @bp: driver handle 1397 * @buf: character buffer to fill with the fw name 1398 * @buf_len: length of the above buffer 1399 * 1400 */ 1401void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); 1402#endif /* BNX2X_CMN_H */ 1403