bnx2x_cmn.c revision 614c76df1d1224dc2eee8678fab6e0b95b49b7da
1/* bnx2x_cmn.c: Broadcom Everest network driver. 2 * 3 * Copyright (c) 2007-2011 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 10 * Written by: Eliezer Tamir 11 * Based on code from Michael Chan's bnx2 driver 12 * UDP CSUM errata workaround by Arik Gendelman 13 * Slowpath and fastpath rework by Vladislav Zolotarov 14 * Statistics and Link management by Yitchak Gertner 15 * 16 */ 17 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20#include <linux/etherdevice.h> 21#include <linux/if_vlan.h> 22#include <linux/interrupt.h> 23#include <linux/ip.h> 24#include <net/ipv6.h> 25#include <net/ip6_checksum.h> 26#include <linux/firmware.h> 27#include <linux/prefetch.h> 28#include "bnx2x_cmn.h" 29#include "bnx2x_init.h" 30#include "bnx2x_sp.h" 31 32 33 34/** 35 * bnx2x_bz_fp - zero content of the fastpath structure. 36 * 37 * @bp: driver handle 38 * @index: fastpath index to be zeroed 39 * 40 * Makes sure the contents of the bp->fp[index].napi is kept 41 * intact. 42 */ 43static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) 44{ 45 struct bnx2x_fastpath *fp = &bp->fp[index]; 46 struct napi_struct orig_napi = fp->napi; 47 /* bzero bnx2x_fastpath contents */ 48 memset(fp, 0, sizeof(*fp)); 49 50 /* Restore the NAPI object as it has been already initialized */ 51 fp->napi = orig_napi; 52 53 fp->bp = bp; 54 fp->index = index; 55 if (IS_ETH_FP(fp)) 56 fp->max_cos = bp->max_cos; 57 else 58 /* Special queues support only one CoS */ 59 fp->max_cos = 1; 60 61 /* 62 * set the tpa flag for each queue. The tpa flag determines the queue 63 * minimal size so it must be set prior to queue memory allocation 64 */ 65 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); 66 67#ifdef BCM_CNIC 68 /* We don't want TPA on an FCoE L2 ring */ 69 if (IS_FCOE_FP(fp)) 70 fp->disable_tpa = 1; 71#endif 72} 73 74/** 75 * bnx2x_move_fp - move content of the fastpath structure. 76 * 77 * @bp: driver handle 78 * @from: source FP index 79 * @to: destination FP index 80 * 81 * Makes sure the contents of the bp->fp[to].napi is kept 82 * intact. This is done by first copying the napi struct from 83 * the target to the source, and then mem copying the entire 84 * source onto the target 85 */ 86static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) 87{ 88 struct bnx2x_fastpath *from_fp = &bp->fp[from]; 89 struct bnx2x_fastpath *to_fp = &bp->fp[to]; 90 91 /* Copy the NAPI object as it has been already initialized */ 92 from_fp->napi = to_fp->napi; 93 94 /* Move bnx2x_fastpath contents */ 95 memcpy(to_fp, from_fp, sizeof(*to_fp)); 96 to_fp->index = to; 97} 98 99int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ 100 101/* free skb in the packet ring at pos idx 102 * return idx of last bd freed 103 */ 104static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, 105 u16 idx, unsigned int *pkts_compl, 106 unsigned int *bytes_compl) 107{ 108 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; 109 struct eth_tx_start_bd *tx_start_bd; 110 struct eth_tx_bd *tx_data_bd; 111 struct sk_buff *skb = tx_buf->skb; 112 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 113 int nbd; 114 115 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 116 prefetch(&skb->end); 117 118 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 119 txdata->txq_index, idx, tx_buf, skb); 120 121 /* unmap first bd */ 122 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 123 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 124 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 125 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); 126 127 128 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 129#ifdef BNX2X_STOP_ON_ERROR 130 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { 131 BNX2X_ERR("BAD nbd!\n"); 132 bnx2x_panic(); 133 } 134#endif 135 new_cons = nbd + tx_buf->first_bd; 136 137 /* Get the next bd */ 138 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 139 140 /* Skip a parse bd... */ 141 --nbd; 142 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 143 144 /* ...and the TSO split header bd since they have no mapping */ 145 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 146 --nbd; 147 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 148 } 149 150 /* now free frags */ 151 while (nbd > 0) { 152 153 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); 154 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 155 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), 156 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); 157 if (--nbd) 158 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 159 } 160 161 /* release skb */ 162 WARN_ON(!skb); 163 if (skb) { 164 (*pkts_compl)++; 165 (*bytes_compl) += skb->len; 166 } 167 dev_kfree_skb_any(skb); 168 tx_buf->first_bd = 0; 169 tx_buf->skb = NULL; 170 171 return new_cons; 172} 173 174int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) 175{ 176 struct netdev_queue *txq; 177 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; 178 unsigned int pkts_compl = 0, bytes_compl = 0; 179 180#ifdef BNX2X_STOP_ON_ERROR 181 if (unlikely(bp->panic)) 182 return -1; 183#endif 184 185 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); 186 hw_cons = le16_to_cpu(*txdata->tx_cons_sb); 187 sw_cons = txdata->tx_pkt_cons; 188 189 while (sw_cons != hw_cons) { 190 u16 pkt_cons; 191 192 pkt_cons = TX_BD(sw_cons); 193 194 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " 195 " pkt_cons %u\n", 196 txdata->txq_index, hw_cons, sw_cons, pkt_cons); 197 198 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons, 199 &pkts_compl, &bytes_compl); 200 201 sw_cons++; 202 } 203 204 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); 205 206 txdata->tx_pkt_cons = sw_cons; 207 txdata->tx_bd_cons = bd_cons; 208 209 /* Need to make the tx_bd_cons update visible to start_xmit() 210 * before checking for netif_tx_queue_stopped(). Without the 211 * memory barrier, there is a small possibility that 212 * start_xmit() will miss it and cause the queue to be stopped 213 * forever. 214 * On the other hand we need an rmb() here to ensure the proper 215 * ordering of bit testing in the following 216 * netif_tx_queue_stopped(txq) call. 217 */ 218 smp_mb(); 219 220 if (unlikely(netif_tx_queue_stopped(txq))) { 221 /* Taking tx_lock() is needed to prevent reenabling the queue 222 * while it's empty. This could have happen if rx_action() gets 223 * suspended in bnx2x_tx_int() after the condition before 224 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()): 225 * 226 * stops the queue->sees fresh tx_bd_cons->releases the queue-> 227 * sends some packets consuming the whole queue again-> 228 * stops the queue 229 */ 230 231 __netif_tx_lock(txq, smp_processor_id()); 232 233 if ((netif_tx_queue_stopped(txq)) && 234 (bp->state == BNX2X_STATE_OPEN) && 235 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) 236 netif_tx_wake_queue(txq); 237 238 __netif_tx_unlock(txq); 239 } 240 return 0; 241} 242 243static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp, 244 u16 idx) 245{ 246 u16 last_max = fp->last_max_sge; 247 248 if (SUB_S16(idx, last_max) > 0) 249 fp->last_max_sge = idx; 250} 251 252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, 253 struct eth_fast_path_rx_cqe *fp_cqe) 254{ 255 struct bnx2x *bp = fp->bp; 256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - 257 le16_to_cpu(fp_cqe->len_on_bd)) >> 258 SGE_PAGE_SHIFT; 259 u16 last_max, last_elem, first_elem; 260 u16 delta = 0; 261 u16 i; 262 263 if (!sge_len) 264 return; 265 266 /* First mark all used pages */ 267 for (i = 0; i < sge_len; i++) 268 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 269 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); 270 271 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", 272 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); 273 274 /* Here we assume that the last SGE index is the biggest */ 275 prefetch((void *)(fp->sge_mask)); 276 bnx2x_update_last_max_sge(fp, 277 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); 278 279 last_max = RX_SGE(fp->last_max_sge); 280 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 281 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; 282 283 /* If ring is not full */ 284 if (last_elem + 1 != first_elem) 285 last_elem++; 286 287 /* Now update the prod */ 288 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) { 289 if (likely(fp->sge_mask[i])) 290 break; 291 292 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; 293 delta += BIT_VEC64_ELEM_SZ; 294 } 295 296 if (delta > 0) { 297 fp->rx_sge_prod += delta; 298 /* clear page-end entries */ 299 bnx2x_clear_sge_mask_next_elems(fp); 300 } 301 302 DP(NETIF_MSG_RX_STATUS, 303 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n", 304 fp->last_max_sge, fp->rx_sge_prod); 305} 306 307/* Set Toeplitz hash value in the skb using the value from the 308 * CQE (calculated by HW). 309 */ 310static u32 bnx2x_get_rxhash(const struct bnx2x *bp, 311 const struct eth_fast_path_rx_cqe *cqe) 312{ 313 /* Set Toeplitz hash from CQE */ 314 if ((bp->dev->features & NETIF_F_RXHASH) && 315 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) 316 return le32_to_cpu(cqe->rss_hash_result); 317 return 0; 318} 319 320static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, 321 u16 cons, u16 prod, 322 struct eth_fast_path_rx_cqe *cqe) 323{ 324 struct bnx2x *bp = fp->bp; 325 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 326 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; 327 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 328 dma_addr_t mapping; 329 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; 330 struct sw_rx_bd *first_buf = &tpa_info->first_buf; 331 332 /* print error if current state != stop */ 333 if (tpa_info->tpa_state != BNX2X_TPA_STOP) 334 BNX2X_ERR("start of bin not in stop [%d]\n", queue); 335 336 /* Try to map an empty data buffer from the aggregation info */ 337 mapping = dma_map_single(&bp->pdev->dev, 338 first_buf->data + NET_SKB_PAD, 339 fp->rx_buf_size, DMA_FROM_DEVICE); 340 /* 341 * ...if it fails - move the skb from the consumer to the producer 342 * and set the current aggregation state as ERROR to drop it 343 * when TPA_STOP arrives. 344 */ 345 346 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 347 /* Move the BD from the consumer to the producer */ 348 bnx2x_reuse_rx_data(fp, cons, prod); 349 tpa_info->tpa_state = BNX2X_TPA_ERROR; 350 return; 351 } 352 353 /* move empty data from pool to prod */ 354 prod_rx_buf->data = first_buf->data; 355 dma_unmap_addr_set(prod_rx_buf, mapping, mapping); 356 /* point prod_bd to new data */ 357 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 358 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 359 360 /* move partial skb from cons to pool (don't unmap yet) */ 361 *first_buf = *cons_rx_buf; 362 363 /* mark bin state as START */ 364 tpa_info->parsing_flags = 365 le16_to_cpu(cqe->pars_flags.flags); 366 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); 367 tpa_info->tpa_state = BNX2X_TPA_START; 368 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); 369 tpa_info->placement_offset = cqe->placement_offset; 370 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe); 371 372#ifdef BNX2X_STOP_ON_ERROR 373 fp->tpa_queue_used |= (1 << queue); 374#ifdef _ASM_GENERIC_INT_L64_H 375 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", 376#else 377 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", 378#endif 379 fp->tpa_queue_used); 380#endif 381} 382 383/* Timestamp option length allowed for TPA aggregation: 384 * 385 * nop nop kind length echo val 386 */ 387#define TPA_TSTAMP_OPT_LEN 12 388/** 389 * bnx2x_set_lro_mss - calculate the approximate value of the MSS 390 * 391 * @bp: driver handle 392 * @parsing_flags: parsing flags from the START CQE 393 * @len_on_bd: total length of the first packet for the 394 * aggregation. 395 * 396 * Approximate value of the MSS for this aggregation calculated using 397 * the first packet of it. 398 */ 399static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, 400 u16 len_on_bd) 401{ 402 /* 403 * TPA arrgregation won't have either IP options or TCP options 404 * other than timestamp or IPv6 extension headers. 405 */ 406 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); 407 408 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == 409 PRS_FLAG_OVERETH_IPV6) 410 hdrs_len += sizeof(struct ipv6hdr); 411 else /* IPv4 */ 412 hdrs_len += sizeof(struct iphdr); 413 414 415 /* Check if there was a TCP timestamp, if there is it's will 416 * always be 12 bytes length: nop nop kind length echo val. 417 * 418 * Otherwise FW would close the aggregation. 419 */ 420 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) 421 hdrs_len += TPA_TSTAMP_OPT_LEN; 422 423 return len_on_bd - hdrs_len; 424} 425 426static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 427 u16 queue, struct sk_buff *skb, 428 struct eth_end_agg_rx_cqe *cqe, 429 u16 cqe_idx) 430{ 431 struct sw_rx_page *rx_pg, old_rx_pg; 432 u32 i, frag_len, frag_size, pages; 433 int err; 434 int j; 435 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; 436 u16 len_on_bd = tpa_info->len_on_bd; 437 438 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; 439 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 440 441 /* This is needed in order to enable forwarding support */ 442 if (frag_size) 443 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, 444 tpa_info->parsing_flags, len_on_bd); 445 446#ifdef BNX2X_STOP_ON_ERROR 447 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { 448 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 449 pages, cqe_idx); 450 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); 451 bnx2x_panic(); 452 return -EINVAL; 453 } 454#endif 455 456 /* Run through the SGL and compose the fragmented skb */ 457 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 458 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); 459 460 /* FW gives the indices of the SGE as if the ring is an array 461 (meaning that "next" element will consume 2 indices) */ 462 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); 463 rx_pg = &fp->rx_page_ring[sge_idx]; 464 old_rx_pg = *rx_pg; 465 466 /* If we fail to allocate a substitute page, we simply stop 467 where we are and drop the whole packet */ 468 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 469 if (unlikely(err)) { 470 fp->eth_q_stats.rx_skb_alloc_failed++; 471 return err; 472 } 473 474 /* Unmap the page as we r going to pass it to the stack */ 475 dma_unmap_page(&bp->pdev->dev, 476 dma_unmap_addr(&old_rx_pg, mapping), 477 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 478 479 /* Add one frag and update the appropriate fields in the skb */ 480 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); 481 482 skb->data_len += frag_len; 483 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE; 484 skb->len += frag_len; 485 486 frag_size -= frag_len; 487 } 488 489 return 0; 490} 491 492static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 493 u16 queue, struct eth_end_agg_rx_cqe *cqe, 494 u16 cqe_idx) 495{ 496 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; 497 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; 498 u32 pad = tpa_info->placement_offset; 499 u16 len = tpa_info->len_on_bd; 500 struct sk_buff *skb = NULL; 501 u8 *data = rx_buf->data; 502 /* alloc new skb */ 503 u8 *new_data; 504 u8 old_tpa_state = tpa_info->tpa_state; 505 506 tpa_info->tpa_state = BNX2X_TPA_STOP; 507 508 /* If we there was an error during the handling of the TPA_START - 509 * drop this aggregation. 510 */ 511 if (old_tpa_state == BNX2X_TPA_ERROR) 512 goto drop; 513 514 /* Try to allocate the new data */ 515 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 516 517 /* Unmap skb in the pool anyway, as we are going to change 518 pool entry status to BNX2X_TPA_STOP even if new skb allocation 519 fails. */ 520 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 521 fp->rx_buf_size, DMA_FROM_DEVICE); 522 if (likely(new_data)) 523 skb = build_skb(data); 524 525 if (likely(skb)) { 526 527#ifdef BNX2X_STOP_ON_ERROR 528 if (pad + len > fp->rx_buf_size) { 529 BNX2X_ERR("skb_put is about to fail... " 530 "pad %d len %d rx_buf_size %d\n", 531 pad, len, fp->rx_buf_size); 532 bnx2x_panic(); 533 return; 534 } 535#endif 536 537 skb_reserve(skb, pad + NET_SKB_PAD); 538 skb_put(skb, len); 539 skb->rxhash = tpa_info->rxhash; 540 541 skb->protocol = eth_type_trans(skb, bp->dev); 542 skb->ip_summed = CHECKSUM_UNNECESSARY; 543 544 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { 545 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) 546 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); 547 napi_gro_receive(&fp->napi, skb); 548 } else { 549 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 550 " - dropping packet!\n"); 551 dev_kfree_skb_any(skb); 552 } 553 554 555 /* put new data in bin */ 556 rx_buf->data = new_data; 557 558 return; 559 } 560 561drop: 562 /* drop the packet and keep the buffer in the bin */ 563 DP(NETIF_MSG_RX_STATUS, 564 "Failed to allocate or map a new skb - dropping packet!\n"); 565 fp->eth_q_stats.rx_skb_alloc_failed++; 566} 567 568 569int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 570{ 571 struct bnx2x *bp = fp->bp; 572 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 573 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; 574 int rx_pkt = 0; 575 576#ifdef BNX2X_STOP_ON_ERROR 577 if (unlikely(bp->panic)) 578 return 0; 579#endif 580 581 /* CQ "next element" is of the size of the regular element, 582 that's why it's ok here */ 583 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb); 584 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 585 hw_comp_cons++; 586 587 bd_cons = fp->rx_bd_cons; 588 bd_prod = fp->rx_bd_prod; 589 bd_prod_fw = bd_prod; 590 sw_comp_cons = fp->rx_comp_cons; 591 sw_comp_prod = fp->rx_comp_prod; 592 593 /* Memory barrier necessary as speculative reads of the rx 594 * buffer can be ahead of the index in the status block 595 */ 596 rmb(); 597 598 DP(NETIF_MSG_RX_STATUS, 599 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", 600 fp->index, hw_comp_cons, sw_comp_cons); 601 602 while (sw_comp_cons != hw_comp_cons) { 603 struct sw_rx_bd *rx_buf = NULL; 604 struct sk_buff *skb; 605 union eth_rx_cqe *cqe; 606 struct eth_fast_path_rx_cqe *cqe_fp; 607 u8 cqe_fp_flags; 608 enum eth_rx_cqe_type cqe_fp_type; 609 u16 len, pad; 610 u8 *data; 611 612#ifdef BNX2X_STOP_ON_ERROR 613 if (unlikely(bp->panic)) 614 return 0; 615#endif 616 617 comp_ring_cons = RCQ_BD(sw_comp_cons); 618 bd_prod = RX_BD(bd_prod); 619 bd_cons = RX_BD(bd_cons); 620 621 cqe = &fp->rx_comp_ring[comp_ring_cons]; 622 cqe_fp = &cqe->fast_path_cqe; 623 cqe_fp_flags = cqe_fp->type_error_flags; 624 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 625 626 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" 627 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), 628 cqe_fp_flags, cqe_fp->status_flags, 629 le32_to_cpu(cqe_fp->rss_hash_result), 630 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); 631 632 /* is this a slowpath msg? */ 633 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { 634 bnx2x_sp_event(fp, cqe); 635 goto next_cqe; 636 } 637 rx_buf = &fp->rx_buf_ring[bd_cons]; 638 data = rx_buf->data; 639 640 if (!CQE_TYPE_FAST(cqe_fp_type)) { 641#ifdef BNX2X_STOP_ON_ERROR 642 /* sanity check */ 643 if (fp->disable_tpa && 644 (CQE_TYPE_START(cqe_fp_type) || 645 CQE_TYPE_STOP(cqe_fp_type))) 646 BNX2X_ERR("START/STOP packet while " 647 "disable_tpa type %x\n", 648 CQE_TYPE(cqe_fp_type)); 649#endif 650 651 if (CQE_TYPE_START(cqe_fp_type)) { 652 u16 queue = cqe_fp->queue_index; 653 DP(NETIF_MSG_RX_STATUS, 654 "calling tpa_start on queue %d\n", 655 queue); 656 657 bnx2x_tpa_start(fp, queue, 658 bd_cons, bd_prod, 659 cqe_fp); 660 goto next_rx; 661 } else { 662 u16 queue = 663 cqe->end_agg_cqe.queue_index; 664 DP(NETIF_MSG_RX_STATUS, 665 "calling tpa_stop on queue %d\n", 666 queue); 667 668 bnx2x_tpa_stop(bp, fp, queue, 669 &cqe->end_agg_cqe, 670 comp_ring_cons); 671#ifdef BNX2X_STOP_ON_ERROR 672 if (bp->panic) 673 return 0; 674#endif 675 676 bnx2x_update_sge_prod(fp, cqe_fp); 677 goto next_cqe; 678 } 679 } 680 /* non TPA */ 681 len = le16_to_cpu(cqe_fp->pkt_len); 682 pad = cqe_fp->placement_offset; 683 dma_sync_single_for_cpu(&bp->pdev->dev, 684 dma_unmap_addr(rx_buf, mapping), 685 pad + RX_COPY_THRESH, 686 DMA_FROM_DEVICE); 687 pad += NET_SKB_PAD; 688 prefetch(data + pad); /* speedup eth_type_trans() */ 689 /* is this an error packet? */ 690 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 691 DP(NETIF_MSG_RX_ERR, 692 "ERROR flags %x rx packet %u\n", 693 cqe_fp_flags, sw_comp_cons); 694 fp->eth_q_stats.rx_err_discard_pkt++; 695 goto reuse_rx; 696 } 697 698 /* Since we don't have a jumbo ring 699 * copy small packets if mtu > 1500 700 */ 701 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) && 702 (len <= RX_COPY_THRESH)) { 703 skb = netdev_alloc_skb_ip_align(bp->dev, len); 704 if (skb == NULL) { 705 DP(NETIF_MSG_RX_ERR, 706 "ERROR packet dropped because of alloc failure\n"); 707 fp->eth_q_stats.rx_skb_alloc_failed++; 708 goto reuse_rx; 709 } 710 memcpy(skb->data, data + pad, len); 711 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 712 } else { 713 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) { 714 dma_unmap_single(&bp->pdev->dev, 715 dma_unmap_addr(rx_buf, mapping), 716 fp->rx_buf_size, 717 DMA_FROM_DEVICE); 718 skb = build_skb(data); 719 if (unlikely(!skb)) { 720 kfree(data); 721 fp->eth_q_stats.rx_skb_alloc_failed++; 722 goto next_rx; 723 } 724 skb_reserve(skb, pad); 725 } else { 726 DP(NETIF_MSG_RX_ERR, 727 "ERROR packet dropped because " 728 "of alloc failure\n"); 729 fp->eth_q_stats.rx_skb_alloc_failed++; 730reuse_rx: 731 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); 732 goto next_rx; 733 } 734 735 skb_put(skb, len); 736 skb->protocol = eth_type_trans(skb, bp->dev); 737 738 /* Set Toeplitz hash for a none-LRO skb */ 739 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp); 740 741 skb_checksum_none_assert(skb); 742 743 if (bp->dev->features & NETIF_F_RXCSUM) { 744 745 if (likely(BNX2X_RX_CSUM_OK(cqe))) 746 skb->ip_summed = CHECKSUM_UNNECESSARY; 747 else 748 fp->eth_q_stats.hw_csum_err++; 749 } 750 } 751 752 skb_record_rx_queue(skb, fp->rx_queue); 753 754 if (le16_to_cpu(cqe_fp->pars_flags.flags) & 755 PARSING_FLAGS_VLAN) 756 __vlan_hwaccel_put_tag(skb, 757 le16_to_cpu(cqe_fp->vlan_tag)); 758 napi_gro_receive(&fp->napi, skb); 759 760 761next_rx: 762 rx_buf->data = NULL; 763 764 bd_cons = NEXT_RX_IDX(bd_cons); 765 bd_prod = NEXT_RX_IDX(bd_prod); 766 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw); 767 rx_pkt++; 768next_cqe: 769 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod); 770 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons); 771 772 if (rx_pkt == budget) 773 break; 774 } /* while */ 775 776 fp->rx_bd_cons = bd_cons; 777 fp->rx_bd_prod = bd_prod_fw; 778 fp->rx_comp_cons = sw_comp_cons; 779 fp->rx_comp_prod = sw_comp_prod; 780 781 /* Update producers */ 782 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, 783 fp->rx_sge_prod); 784 785 fp->rx_pkt += rx_pkt; 786 fp->rx_calls++; 787 788 return rx_pkt; 789} 790 791static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) 792{ 793 struct bnx2x_fastpath *fp = fp_cookie; 794 struct bnx2x *bp = fp->bp; 795 u8 cos; 796 797 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " 798 "[fp %d fw_sd %d igusb %d]\n", 799 fp->index, fp->fw_sb_id, fp->igu_sb_id); 800 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 801 802#ifdef BNX2X_STOP_ON_ERROR 803 if (unlikely(bp->panic)) 804 return IRQ_HANDLED; 805#endif 806 807 /* Handle Rx and Tx according to MSI-X vector */ 808 prefetch(fp->rx_cons_sb); 809 810 for_each_cos_in_tx_queue(fp, cos) 811 prefetch(fp->txdata[cos].tx_cons_sb); 812 813 prefetch(&fp->sb_running_index[SM_RX_ID]); 814 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 815 816 return IRQ_HANDLED; 817} 818 819/* HW Lock for shared dual port PHYs */ 820void bnx2x_acquire_phy_lock(struct bnx2x *bp) 821{ 822 mutex_lock(&bp->port.phy_mutex); 823 824 if (bp->port.need_hw_lock) 825 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); 826} 827 828void bnx2x_release_phy_lock(struct bnx2x *bp) 829{ 830 if (bp->port.need_hw_lock) 831 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); 832 833 mutex_unlock(&bp->port.phy_mutex); 834} 835 836/* calculates MF speed according to current linespeed and MF configuration */ 837u16 bnx2x_get_mf_speed(struct bnx2x *bp) 838{ 839 u16 line_speed = bp->link_vars.line_speed; 840 if (IS_MF(bp)) { 841 u16 maxCfg = bnx2x_extract_max_cfg(bp, 842 bp->mf_config[BP_VN(bp)]); 843 844 /* Calculate the current MAX line speed limit for the MF 845 * devices 846 */ 847 if (IS_MF_SI(bp)) 848 line_speed = (line_speed * maxCfg) / 100; 849 else { /* SD mode */ 850 u16 vn_max_rate = maxCfg * 100; 851 852 if (vn_max_rate < line_speed) 853 line_speed = vn_max_rate; 854 } 855 } 856 857 return line_speed; 858} 859 860/** 861 * bnx2x_fill_report_data - fill link report data to report 862 * 863 * @bp: driver handle 864 * @data: link state to update 865 * 866 * It uses a none-atomic bit operations because is called under the mutex. 867 */ 868static inline void bnx2x_fill_report_data(struct bnx2x *bp, 869 struct bnx2x_link_report_data *data) 870{ 871 u16 line_speed = bnx2x_get_mf_speed(bp); 872 873 memset(data, 0, sizeof(*data)); 874 875 /* Fill the report data: efective line speed */ 876 data->line_speed = line_speed; 877 878 /* Link is down */ 879 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS)) 880 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 881 &data->link_report_flags); 882 883 /* Full DUPLEX */ 884 if (bp->link_vars.duplex == DUPLEX_FULL) 885 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags); 886 887 /* Rx Flow Control is ON */ 888 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) 889 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags); 890 891 /* Tx Flow Control is ON */ 892 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) 893 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags); 894} 895 896/** 897 * bnx2x_link_report - report link status to OS. 898 * 899 * @bp: driver handle 900 * 901 * Calls the __bnx2x_link_report() under the same locking scheme 902 * as a link/PHY state managing code to ensure a consistent link 903 * reporting. 904 */ 905 906void bnx2x_link_report(struct bnx2x *bp) 907{ 908 bnx2x_acquire_phy_lock(bp); 909 __bnx2x_link_report(bp); 910 bnx2x_release_phy_lock(bp); 911} 912 913/** 914 * __bnx2x_link_report - report link status to OS. 915 * 916 * @bp: driver handle 917 * 918 * None atomic inmlementation. 919 * Should be called under the phy_lock. 920 */ 921void __bnx2x_link_report(struct bnx2x *bp) 922{ 923 struct bnx2x_link_report_data cur_data; 924 925 /* reread mf_cfg */ 926 if (!CHIP_IS_E1(bp)) 927 bnx2x_read_mf_cfg(bp); 928 929 /* Read the current link report info */ 930 bnx2x_fill_report_data(bp, &cur_data); 931 932 /* Don't report link down or exactly the same link status twice */ 933 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) || 934 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 935 &bp->last_reported_link.link_report_flags) && 936 test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 937 &cur_data.link_report_flags))) 938 return; 939 940 bp->link_cnt++; 941 942 /* We are going to report a new link parameters now - 943 * remember the current data for the next time. 944 */ 945 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data)); 946 947 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 948 &cur_data.link_report_flags)) { 949 netif_carrier_off(bp->dev); 950 netdev_err(bp->dev, "NIC Link is Down\n"); 951 return; 952 } else { 953 const char *duplex; 954 const char *flow; 955 956 netif_carrier_on(bp->dev); 957 958 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD, 959 &cur_data.link_report_flags)) 960 duplex = "full"; 961 else 962 duplex = "half"; 963 964 /* Handle the FC at the end so that only these flags would be 965 * possibly set. This way we may easily check if there is no FC 966 * enabled. 967 */ 968 if (cur_data.link_report_flags) { 969 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, 970 &cur_data.link_report_flags)) { 971 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, 972 &cur_data.link_report_flags)) 973 flow = "ON - receive & transmit"; 974 else 975 flow = "ON - receive"; 976 } else { 977 flow = "ON - transmit"; 978 } 979 } else { 980 flow = "none"; 981 } 982 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", 983 cur_data.line_speed, duplex, flow); 984 } 985} 986 987void bnx2x_init_rx_rings(struct bnx2x *bp) 988{ 989 int func = BP_FUNC(bp); 990 u16 ring_prod; 991 int i, j; 992 993 /* Allocate TPA resources */ 994 for_each_rx_queue(bp, j) { 995 struct bnx2x_fastpath *fp = &bp->fp[j]; 996 997 DP(NETIF_MSG_IFUP, 998 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); 999 1000 if (!fp->disable_tpa) { 1001 /* Fill the per-aggregtion pool */ 1002 for (i = 0; i < MAX_AGG_QS(bp); i++) { 1003 struct bnx2x_agg_info *tpa_info = 1004 &fp->tpa_info[i]; 1005 struct sw_rx_bd *first_buf = 1006 &tpa_info->first_buf; 1007 1008 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, 1009 GFP_ATOMIC); 1010 if (!first_buf->data) { 1011 BNX2X_ERR("Failed to allocate TPA " 1012 "skb pool for queue[%d] - " 1013 "disabling TPA on this " 1014 "queue!\n", j); 1015 bnx2x_free_tpa_pool(bp, fp, i); 1016 fp->disable_tpa = 1; 1017 break; 1018 } 1019 dma_unmap_addr_set(first_buf, mapping, 0); 1020 tpa_info->tpa_state = BNX2X_TPA_STOP; 1021 } 1022 1023 /* "next page" elements initialization */ 1024 bnx2x_set_next_page_sgl(fp); 1025 1026 /* set SGEs bit mask */ 1027 bnx2x_init_sge_ring_bit_mask(fp); 1028 1029 /* Allocate SGEs and initialize the ring elements */ 1030 for (i = 0, ring_prod = 0; 1031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) { 1032 1033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { 1034 BNX2X_ERR("was only able to allocate " 1035 "%d rx sges\n", i); 1036 BNX2X_ERR("disabling TPA for " 1037 "queue[%d]\n", j); 1038 /* Cleanup already allocated elements */ 1039 bnx2x_free_rx_sge_range(bp, fp, 1040 ring_prod); 1041 bnx2x_free_tpa_pool(bp, fp, 1042 MAX_AGG_QS(bp)); 1043 fp->disable_tpa = 1; 1044 ring_prod = 0; 1045 break; 1046 } 1047 ring_prod = NEXT_SGE_IDX(ring_prod); 1048 } 1049 1050 fp->rx_sge_prod = ring_prod; 1051 } 1052 } 1053 1054 for_each_rx_queue(bp, j) { 1055 struct bnx2x_fastpath *fp = &bp->fp[j]; 1056 1057 fp->rx_bd_cons = 0; 1058 1059 /* Activate BD ring */ 1060 /* Warning! 1061 * this will generate an interrupt (to the TSTORM) 1062 * must only be done after chip is initialized 1063 */ 1064 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, 1065 fp->rx_sge_prod); 1066 1067 if (j != 0) 1068 continue; 1069 1070 if (CHIP_IS_E1(bp)) { 1071 REG_WR(bp, BAR_USTRORM_INTMEM + 1072 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), 1073 U64_LO(fp->rx_comp_mapping)); 1074 REG_WR(bp, BAR_USTRORM_INTMEM + 1075 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, 1076 U64_HI(fp->rx_comp_mapping)); 1077 } 1078 } 1079} 1080 1081static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1082{ 1083 int i; 1084 u8 cos; 1085 1086 for_each_tx_queue(bp, i) { 1087 struct bnx2x_fastpath *fp = &bp->fp[i]; 1088 for_each_cos_in_tx_queue(fp, cos) { 1089 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 1090 unsigned pkts_compl = 0, bytes_compl = 0; 1091 1092 u16 sw_prod = txdata->tx_pkt_prod; 1093 u16 sw_cons = txdata->tx_pkt_cons; 1094 1095 while (sw_cons != sw_prod) { 1096 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), 1097 &pkts_compl, &bytes_compl); 1098 sw_cons++; 1099 } 1100 netdev_tx_reset_queue( 1101 netdev_get_tx_queue(bp->dev, txdata->txq_index)); 1102 } 1103 } 1104} 1105 1106static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) 1107{ 1108 struct bnx2x *bp = fp->bp; 1109 int i; 1110 1111 /* ring wasn't allocated */ 1112 if (fp->rx_buf_ring == NULL) 1113 return; 1114 1115 for (i = 0; i < NUM_RX_BD; i++) { 1116 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; 1117 u8 *data = rx_buf->data; 1118 1119 if (data == NULL) 1120 continue; 1121 dma_unmap_single(&bp->pdev->dev, 1122 dma_unmap_addr(rx_buf, mapping), 1123 fp->rx_buf_size, DMA_FROM_DEVICE); 1124 1125 rx_buf->data = NULL; 1126 kfree(data); 1127 } 1128} 1129 1130static void bnx2x_free_rx_skbs(struct bnx2x *bp) 1131{ 1132 int j; 1133 1134 for_each_rx_queue(bp, j) { 1135 struct bnx2x_fastpath *fp = &bp->fp[j]; 1136 1137 bnx2x_free_rx_bds(fp); 1138 1139 if (!fp->disable_tpa) 1140 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); 1141 } 1142} 1143 1144void bnx2x_free_skbs(struct bnx2x *bp) 1145{ 1146 bnx2x_free_tx_skbs(bp); 1147 bnx2x_free_rx_skbs(bp); 1148} 1149 1150void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) 1151{ 1152 /* load old values */ 1153 u32 mf_cfg = bp->mf_config[BP_VN(bp)]; 1154 1155 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { 1156 /* leave all but MAX value */ 1157 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; 1158 1159 /* set new MAX value */ 1160 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) 1161 & FUNC_MF_CFG_MAX_BW_MASK; 1162 1163 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); 1164 } 1165} 1166 1167/** 1168 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors 1169 * 1170 * @bp: driver handle 1171 * @nvecs: number of vectors to be released 1172 */ 1173static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) 1174{ 1175 int i, offset = 0; 1176 1177 if (nvecs == offset) 1178 return; 1179 free_irq(bp->msix_table[offset].vector, bp->dev); 1180 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 1181 bp->msix_table[offset].vector); 1182 offset++; 1183#ifdef BCM_CNIC 1184 if (nvecs == offset) 1185 return; 1186 offset++; 1187#endif 1188 1189 for_each_eth_queue(bp, i) { 1190 if (nvecs == offset) 1191 return; 1192 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " 1193 "irq\n", i, bp->msix_table[offset].vector); 1194 1195 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); 1196 } 1197} 1198 1199void bnx2x_free_irq(struct bnx2x *bp) 1200{ 1201 if (bp->flags & USING_MSIX_FLAG) 1202 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1203 CNIC_PRESENT + 1); 1204 else if (bp->flags & USING_MSI_FLAG) 1205 free_irq(bp->pdev->irq, bp->dev); 1206 else 1207 free_irq(bp->pdev->irq, bp->dev); 1208} 1209 1210int bnx2x_enable_msix(struct bnx2x *bp) 1211{ 1212 int msix_vec = 0, i, rc, req_cnt; 1213 1214 bp->msix_table[msix_vec].entry = msix_vec; 1215 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", 1216 bp->msix_table[0].entry); 1217 msix_vec++; 1218 1219#ifdef BCM_CNIC 1220 bp->msix_table[msix_vec].entry = msix_vec; 1221 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n", 1222 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); 1223 msix_vec++; 1224#endif 1225 /* We need separate vectors for ETH queues only (not FCoE) */ 1226 for_each_eth_queue(bp, i) { 1227 bp->msix_table[msix_vec].entry = msix_vec; 1228 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " 1229 "(fastpath #%u)\n", msix_vec, msix_vec, i); 1230 msix_vec++; 1231 } 1232 1233 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; 1234 1235 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); 1236 1237 /* 1238 * reconfigure number of tx/rx queues according to available 1239 * MSI-X vectors 1240 */ 1241 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1242 /* how less vectors we will have? */ 1243 int diff = req_cnt - rc; 1244 1245 DP(NETIF_MSG_IFUP, 1246 "Trying to use less MSI-X vectors: %d\n", rc); 1247 1248 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); 1249 1250 if (rc) { 1251 DP(NETIF_MSG_IFUP, 1252 "MSI-X is not attainable rc %d\n", rc); 1253 return rc; 1254 } 1255 /* 1256 * decrease number of queues by number of unallocated entries 1257 */ 1258 bp->num_queues -= diff; 1259 1260 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", 1261 bp->num_queues); 1262 } else if (rc) { 1263 /* fall to INTx if not enough memory */ 1264 if (rc == -ENOMEM) 1265 bp->flags |= DISABLE_MSI_FLAG; 1266 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); 1267 return rc; 1268 } 1269 1270 bp->flags |= USING_MSIX_FLAG; 1271 1272 return 0; 1273} 1274 1275static int bnx2x_req_msix_irqs(struct bnx2x *bp) 1276{ 1277 int i, rc, offset = 0; 1278 1279 rc = request_irq(bp->msix_table[offset++].vector, 1280 bnx2x_msix_sp_int, 0, 1281 bp->dev->name, bp->dev); 1282 if (rc) { 1283 BNX2X_ERR("request sp irq failed\n"); 1284 return -EBUSY; 1285 } 1286 1287#ifdef BCM_CNIC 1288 offset++; 1289#endif 1290 for_each_eth_queue(bp, i) { 1291 struct bnx2x_fastpath *fp = &bp->fp[i]; 1292 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1293 bp->dev->name, i); 1294 1295 rc = request_irq(bp->msix_table[offset].vector, 1296 bnx2x_msix_fp_int, 0, fp->name, fp); 1297 if (rc) { 1298 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, 1299 bp->msix_table[offset].vector, rc); 1300 bnx2x_free_msix_irqs(bp, offset); 1301 return -EBUSY; 1302 } 1303 1304 offset++; 1305 } 1306 1307 i = BNX2X_NUM_ETH_QUEUES(bp); 1308 offset = 1 + CNIC_PRESENT; 1309 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" 1310 " ... fp[%d] %d\n", 1311 bp->msix_table[0].vector, 1312 0, bp->msix_table[offset].vector, 1313 i - 1, bp->msix_table[offset + i - 1].vector); 1314 1315 return 0; 1316} 1317 1318int bnx2x_enable_msi(struct bnx2x *bp) 1319{ 1320 int rc; 1321 1322 rc = pci_enable_msi(bp->pdev); 1323 if (rc) { 1324 DP(NETIF_MSG_IFUP, "MSI is not attainable\n"); 1325 return -1; 1326 } 1327 bp->flags |= USING_MSI_FLAG; 1328 1329 return 0; 1330} 1331 1332static int bnx2x_req_irq(struct bnx2x *bp) 1333{ 1334 unsigned long flags; 1335 int rc; 1336 1337 if (bp->flags & USING_MSI_FLAG) 1338 flags = 0; 1339 else 1340 flags = IRQF_SHARED; 1341 1342 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, 1343 bp->dev->name, bp->dev); 1344 return rc; 1345} 1346 1347static inline int bnx2x_setup_irqs(struct bnx2x *bp) 1348{ 1349 int rc = 0; 1350 if (bp->flags & USING_MSIX_FLAG) { 1351 rc = bnx2x_req_msix_irqs(bp); 1352 if (rc) 1353 return rc; 1354 } else { 1355 bnx2x_ack_int(bp); 1356 rc = bnx2x_req_irq(bp); 1357 if (rc) { 1358 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); 1359 return rc; 1360 } 1361 if (bp->flags & USING_MSI_FLAG) { 1362 bp->dev->irq = bp->pdev->irq; 1363 netdev_info(bp->dev, "using MSI IRQ %d\n", 1364 bp->pdev->irq); 1365 } 1366 } 1367 1368 return 0; 1369} 1370 1371static inline void bnx2x_napi_enable(struct bnx2x *bp) 1372{ 1373 int i; 1374 1375 for_each_rx_queue(bp, i) 1376 napi_enable(&bnx2x_fp(bp, i, napi)); 1377} 1378 1379static inline void bnx2x_napi_disable(struct bnx2x *bp) 1380{ 1381 int i; 1382 1383 for_each_rx_queue(bp, i) 1384 napi_disable(&bnx2x_fp(bp, i, napi)); 1385} 1386 1387void bnx2x_netif_start(struct bnx2x *bp) 1388{ 1389 if (netif_running(bp->dev)) { 1390 bnx2x_napi_enable(bp); 1391 bnx2x_int_enable(bp); 1392 if (bp->state == BNX2X_STATE_OPEN) 1393 netif_tx_wake_all_queues(bp->dev); 1394 } 1395} 1396 1397void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) 1398{ 1399 bnx2x_int_disable_sync(bp, disable_hw); 1400 bnx2x_napi_disable(bp); 1401} 1402 1403u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1404{ 1405 struct bnx2x *bp = netdev_priv(dev); 1406 1407#ifdef BCM_CNIC 1408 if (!NO_FCOE(bp)) { 1409 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1410 u16 ether_type = ntohs(hdr->h_proto); 1411 1412 /* Skip VLAN tag if present */ 1413 if (ether_type == ETH_P_8021Q) { 1414 struct vlan_ethhdr *vhdr = 1415 (struct vlan_ethhdr *)skb->data; 1416 1417 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); 1418 } 1419 1420 /* If ethertype is FCoE or FIP - use FCoE ring */ 1421 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1422 return bnx2x_fcoe_tx(bp, txq_index); 1423 } 1424#endif 1425 /* select a non-FCoE queue */ 1426 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1427} 1428 1429void bnx2x_set_num_queues(struct bnx2x *bp) 1430{ 1431 switch (bp->multi_mode) { 1432 case ETH_RSS_MODE_DISABLED: 1433 bp->num_queues = 1; 1434 break; 1435 case ETH_RSS_MODE_REGULAR: 1436 bp->num_queues = bnx2x_calc_num_queues(bp); 1437 break; 1438 1439 default: 1440 bp->num_queues = 1; 1441 break; 1442 } 1443 1444#ifdef BCM_CNIC 1445 /* override in ISCSI SD mod */ 1446 if (IS_MF_ISCSI_SD(bp)) 1447 bp->num_queues = 1; 1448#endif 1449 /* Add special queues */ 1450 bp->num_queues += NON_ETH_CONTEXT_USE; 1451} 1452 1453/** 1454 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues 1455 * 1456 * @bp: Driver handle 1457 * 1458 * We currently support for at most 16 Tx queues for each CoS thus we will 1459 * allocate a multiple of 16 for ETH L2 rings according to the value of the 1460 * bp->max_cos. 1461 * 1462 * If there is an FCoE L2 queue the appropriate Tx queue will have the next 1463 * index after all ETH L2 indices. 1464 * 1465 * If the actual number of Tx queues (for each CoS) is less than 16 then there 1466 * will be the holes at the end of each group of 16 ETh L2 indices (0..15, 1467 * 16..31,...) with indicies that are not coupled with any real Tx queue. 1468 * 1469 * The proper configuration of skb->queue_mapping is handled by 1470 * bnx2x_select_queue() and __skb_tx_hash(). 1471 * 1472 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1473 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1474 */ 1475static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) 1476{ 1477 int rc, tx, rx; 1478 1479 tx = MAX_TXQS_PER_COS * bp->max_cos; 1480 rx = BNX2X_NUM_ETH_QUEUES(bp); 1481 1482/* account for fcoe queue */ 1483#ifdef BCM_CNIC 1484 if (!NO_FCOE(bp)) { 1485 rx += FCOE_PRESENT; 1486 tx += FCOE_PRESENT; 1487 } 1488#endif 1489 1490 rc = netif_set_real_num_tx_queues(bp->dev, tx); 1491 if (rc) { 1492 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); 1493 return rc; 1494 } 1495 rc = netif_set_real_num_rx_queues(bp->dev, rx); 1496 if (rc) { 1497 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); 1498 return rc; 1499 } 1500 1501 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n", 1502 tx, rx); 1503 1504 return rc; 1505} 1506 1507static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) 1508{ 1509 int i; 1510 1511 for_each_queue(bp, i) { 1512 struct bnx2x_fastpath *fp = &bp->fp[i]; 1513 u32 mtu; 1514 1515 /* Always use a mini-jumbo MTU for the FCoE L2 ring */ 1516 if (IS_FCOE_IDX(i)) 1517 /* 1518 * Although there are no IP frames expected to arrive to 1519 * this ring we still want to add an 1520 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer 1521 * overrun attack. 1522 */ 1523 mtu = BNX2X_FCOE_MINI_JUMBO_MTU; 1524 else 1525 mtu = bp->dev->mtu; 1526 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START + 1527 IP_HEADER_ALIGNMENT_PADDING + 1528 ETH_OVREHEAD + 1529 mtu + 1530 BNX2X_FW_RX_ALIGN_END; 1531 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ 1532 } 1533} 1534 1535static inline int bnx2x_init_rss_pf(struct bnx2x *bp) 1536{ 1537 int i; 1538 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 1539 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1540 1541 /* 1542 * Prepare the inital contents fo the indirection table if RSS is 1543 * enabled 1544 */ 1545 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { 1546 for (i = 0; i < sizeof(ind_table); i++) 1547 ind_table[i] = 1548 bp->fp->cl_id + (i % num_eth_queues); 1549 } 1550 1551 /* 1552 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 1553 * per-port, so if explicit configuration is needed , do it only 1554 * for a PMF. 1555 * 1556 * For 57712 and newer on the other hand it's a per-function 1557 * configuration. 1558 */ 1559 return bnx2x_config_rss_pf(bp, ind_table, 1560 bp->port.pmf || !CHIP_IS_E1x(bp)); 1561} 1562 1563int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) 1564{ 1565 struct bnx2x_config_rss_params params = {0}; 1566 int i; 1567 1568 /* Although RSS is meaningless when there is a single HW queue we 1569 * still need it enabled in order to have HW Rx hash generated. 1570 * 1571 * if (!is_eth_multi(bp)) 1572 * bp->multi_mode = ETH_RSS_MODE_DISABLED; 1573 */ 1574 1575 params.rss_obj = &bp->rss_conf_obj; 1576 1577 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); 1578 1579 /* RSS mode */ 1580 switch (bp->multi_mode) { 1581 case ETH_RSS_MODE_DISABLED: 1582 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); 1583 break; 1584 case ETH_RSS_MODE_REGULAR: 1585 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); 1586 break; 1587 case ETH_RSS_MODE_VLAN_PRI: 1588 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); 1589 break; 1590 case ETH_RSS_MODE_E1HOV_PRI: 1591 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); 1592 break; 1593 case ETH_RSS_MODE_IP_DSCP: 1594 __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); 1595 break; 1596 default: 1597 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); 1598 return -EINVAL; 1599 } 1600 1601 /* If RSS is enabled */ 1602 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { 1603 /* RSS configuration */ 1604 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); 1605 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); 1606 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); 1607 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); 1608 1609 /* Hash bits */ 1610 params.rss_result_mask = MULTI_MASK; 1611 1612 memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); 1613 1614 if (config_hash) { 1615 /* RSS keys */ 1616 for (i = 0; i < sizeof(params.rss_key) / 4; i++) 1617 params.rss_key[i] = random32(); 1618 1619 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); 1620 } 1621 } 1622 1623 return bnx2x_config_rss(bp, ¶ms); 1624} 1625 1626static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 1627{ 1628 struct bnx2x_func_state_params func_params = {0}; 1629 1630 /* Prepare parameters for function state transitions */ 1631 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); 1632 1633 func_params.f_obj = &bp->func_obj; 1634 func_params.cmd = BNX2X_F_CMD_HW_INIT; 1635 1636 func_params.params.hw_init.load_phase = load_code; 1637 1638 return bnx2x_func_state_change(bp, &func_params); 1639} 1640 1641/* 1642 * Cleans the object that have internal lists without sending 1643 * ramrods. Should be run when interrutps are disabled. 1644 */ 1645static void bnx2x_squeeze_objects(struct bnx2x *bp) 1646{ 1647 int rc; 1648 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 1649 struct bnx2x_mcast_ramrod_params rparam = {0}; 1650 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; 1651 1652 /***************** Cleanup MACs' object first *************************/ 1653 1654 /* Wait for completion of requested */ 1655 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 1656 /* Perform a dry cleanup */ 1657 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); 1658 1659 /* Clean ETH primary MAC */ 1660 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); 1661 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, 1662 &ramrod_flags); 1663 if (rc != 0) 1664 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); 1665 1666 /* Cleanup UC list */ 1667 vlan_mac_flags = 0; 1668 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); 1669 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, 1670 &ramrod_flags); 1671 if (rc != 0) 1672 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); 1673 1674 /***************** Now clean mcast object *****************************/ 1675 rparam.mcast_obj = &bp->mcast_obj; 1676 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); 1677 1678 /* Add a DEL command... */ 1679 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 1680 if (rc < 0) 1681 BNX2X_ERR("Failed to add a new DEL command to a multi-cast " 1682 "object: %d\n", rc); 1683 1684 /* ...and wait until all pending commands are cleared */ 1685 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1686 while (rc != 0) { 1687 if (rc < 0) { 1688 BNX2X_ERR("Failed to clean multi-cast object: %d\n", 1689 rc); 1690 return; 1691 } 1692 1693 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1694 } 1695} 1696 1697#ifndef BNX2X_STOP_ON_ERROR 1698#define LOAD_ERROR_EXIT(bp, label) \ 1699 do { \ 1700 (bp)->state = BNX2X_STATE_ERROR; \ 1701 goto label; \ 1702 } while (0) 1703#else 1704#define LOAD_ERROR_EXIT(bp, label) \ 1705 do { \ 1706 (bp)->state = BNX2X_STATE_ERROR; \ 1707 (bp)->panic = 1; \ 1708 return -EBUSY; \ 1709 } while (0) 1710#endif 1711 1712/* must be called with rtnl_lock */ 1713int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1714{ 1715 int port = BP_PORT(bp); 1716 u32 load_code; 1717 int i, rc; 1718 1719#ifdef BNX2X_STOP_ON_ERROR 1720 if (unlikely(bp->panic)) 1721 return -EPERM; 1722#endif 1723 1724 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 1725 1726 /* Set the initial link reported state to link down */ 1727 bnx2x_acquire_phy_lock(bp); 1728 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); 1729 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1730 &bp->last_reported_link.link_report_flags); 1731 bnx2x_release_phy_lock(bp); 1732 1733 /* must be called before memory allocation and HW init */ 1734 bnx2x_ilt_set_info(bp); 1735 1736 /* 1737 * Zero fastpath structures preserving invariants like napi, which are 1738 * allocated only once, fp index, max_cos, bp pointer. 1739 * Also set fp->disable_tpa. 1740 */ 1741 for_each_queue(bp, i) 1742 bnx2x_bz_fp(bp, i); 1743 1744 1745 /* Set the receive queues buffer size */ 1746 bnx2x_set_rx_buf_size(bp); 1747 1748 if (bnx2x_alloc_mem(bp)) 1749 return -ENOMEM; 1750 1751 /* As long as bnx2x_alloc_mem() may possibly update 1752 * bp->num_queues, bnx2x_set_real_num_queues() should always 1753 * come after it. 1754 */ 1755 rc = bnx2x_set_real_num_queues(bp); 1756 if (rc) { 1757 BNX2X_ERR("Unable to set real_num_queues\n"); 1758 LOAD_ERROR_EXIT(bp, load_error0); 1759 } 1760 1761 /* configure multi cos mappings in kernel. 1762 * this configuration may be overriden by a multi class queue discipline 1763 * or by a dcbx negotiation result. 1764 */ 1765 bnx2x_setup_tc(bp->dev, bp->max_cos); 1766 1767 bnx2x_napi_enable(bp); 1768 1769 /* Send LOAD_REQUEST command to MCP 1770 * Returns the type of LOAD command: 1771 * if it is the first port to be initialized 1772 * common blocks should be initialized, otherwise - not 1773 */ 1774 if (!BP_NOMCP(bp)) { 1775 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); 1776 if (!load_code) { 1777 BNX2X_ERR("MCP response failure, aborting\n"); 1778 rc = -EBUSY; 1779 LOAD_ERROR_EXIT(bp, load_error1); 1780 } 1781 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { 1782 rc = -EBUSY; /* other port in diagnostic mode */ 1783 LOAD_ERROR_EXIT(bp, load_error1); 1784 } 1785 1786 } else { 1787 int path = BP_PATH(bp); 1788 1789 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", 1790 path, load_count[path][0], load_count[path][1], 1791 load_count[path][2]); 1792 load_count[path][0]++; 1793 load_count[path][1 + port]++; 1794 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n", 1795 path, load_count[path][0], load_count[path][1], 1796 load_count[path][2]); 1797 if (load_count[path][0] == 1) 1798 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 1799 else if (load_count[path][1 + port] == 1) 1800 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 1801 else 1802 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 1803 } 1804 1805 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 1806 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || 1807 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { 1808 bp->port.pmf = 1; 1809 /* 1810 * We need the barrier to ensure the ordering between the 1811 * writing to bp->port.pmf here and reading it from the 1812 * bnx2x_periodic_task(). 1813 */ 1814 smp_mb(); 1815 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 1816 } else 1817 bp->port.pmf = 0; 1818 1819 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); 1820 1821 /* Init Function state controlling object */ 1822 bnx2x__init_func_obj(bp); 1823 1824 /* Initialize HW */ 1825 rc = bnx2x_init_hw(bp, load_code); 1826 if (rc) { 1827 BNX2X_ERR("HW init failed, aborting\n"); 1828 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 1829 LOAD_ERROR_EXIT(bp, load_error2); 1830 } 1831 1832 /* Connect to IRQs */ 1833 rc = bnx2x_setup_irqs(bp); 1834 if (rc) { 1835 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 1836 LOAD_ERROR_EXIT(bp, load_error2); 1837 } 1838 1839 /* Setup NIC internals and enable interrupts */ 1840 bnx2x_nic_init(bp, load_code); 1841 1842 /* Init per-function objects */ 1843 bnx2x_init_bp_objs(bp); 1844 1845 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 1846 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && 1847 (bp->common.shmem2_base)) { 1848 if (SHMEM2_HAS(bp, dcc_support)) 1849 SHMEM2_WR(bp, dcc_support, 1850 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 1851 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); 1852 } 1853 1854 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 1855 rc = bnx2x_func_start(bp); 1856 if (rc) { 1857 BNX2X_ERR("Function start failed!\n"); 1858 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 1859 LOAD_ERROR_EXIT(bp, load_error3); 1860 } 1861 1862 /* Send LOAD_DONE command to MCP */ 1863 if (!BP_NOMCP(bp)) { 1864 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 1865 if (!load_code) { 1866 BNX2X_ERR("MCP response failure, aborting\n"); 1867 rc = -EBUSY; 1868 LOAD_ERROR_EXIT(bp, load_error3); 1869 } 1870 } 1871 1872 rc = bnx2x_setup_leading(bp); 1873 if (rc) { 1874 BNX2X_ERR("Setup leading failed!\n"); 1875 LOAD_ERROR_EXIT(bp, load_error3); 1876 } 1877 1878#ifdef BCM_CNIC 1879 /* Enable Timer scan */ 1880 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); 1881#endif 1882 1883 for_each_nondefault_queue(bp, i) { 1884 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); 1885 if (rc) 1886 LOAD_ERROR_EXIT(bp, load_error4); 1887 } 1888 1889 rc = bnx2x_init_rss_pf(bp); 1890 if (rc) 1891 LOAD_ERROR_EXIT(bp, load_error4); 1892 1893 /* Now when Clients are configured we are ready to work */ 1894 bp->state = BNX2X_STATE_OPEN; 1895 1896 /* Configure a ucast MAC */ 1897 rc = bnx2x_set_eth_mac(bp, true); 1898 if (rc) 1899 LOAD_ERROR_EXIT(bp, load_error4); 1900 1901 if (bp->pending_max) { 1902 bnx2x_update_max_mf_config(bp, bp->pending_max); 1903 bp->pending_max = 0; 1904 } 1905 1906 if (bp->port.pmf) 1907 bnx2x_initial_phy_init(bp, load_mode); 1908 1909 /* Start fast path */ 1910 1911 /* Initialize Rx filter. */ 1912 netif_addr_lock_bh(bp->dev); 1913 bnx2x_set_rx_mode(bp->dev); 1914 netif_addr_unlock_bh(bp->dev); 1915 1916 /* Start the Tx */ 1917 switch (load_mode) { 1918 case LOAD_NORMAL: 1919 /* Tx queue should be only reenabled */ 1920 netif_tx_wake_all_queues(bp->dev); 1921 break; 1922 1923 case LOAD_OPEN: 1924 netif_tx_start_all_queues(bp->dev); 1925 smp_mb__after_clear_bit(); 1926 break; 1927 1928 case LOAD_DIAG: 1929 bp->state = BNX2X_STATE_DIAG; 1930 break; 1931 1932 default: 1933 break; 1934 } 1935 1936 if (bp->port.pmf) 1937 bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); 1938 else 1939 bnx2x__link_status_update(bp); 1940 1941 /* start the timer */ 1942 mod_timer(&bp->timer, jiffies + bp->current_interval); 1943 1944#ifdef BCM_CNIC 1945 /* re-read iscsi info */ 1946 bnx2x_get_iscsi_info(bp); 1947 bnx2x_setup_cnic_irq_info(bp); 1948 if (bp->state == BNX2X_STATE_OPEN) 1949 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 1950#endif 1951 bnx2x_inc_load_cnt(bp); 1952 1953 /* Wait for all pending SP commands to complete */ 1954 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { 1955 BNX2X_ERR("Timeout waiting for SP elements to complete\n"); 1956 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 1957 return -EBUSY; 1958 } 1959 1960 bnx2x_dcbx_init(bp); 1961 return 0; 1962 1963#ifndef BNX2X_STOP_ON_ERROR 1964load_error4: 1965#ifdef BCM_CNIC 1966 /* Disable Timer scan */ 1967 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 1968#endif 1969load_error3: 1970 bnx2x_int_disable_sync(bp, 1); 1971 1972 /* Clean queueable objects */ 1973 bnx2x_squeeze_objects(bp); 1974 1975 /* Free SKBs, SGEs, TPA pool and driver internals */ 1976 bnx2x_free_skbs(bp); 1977 for_each_rx_queue(bp, i) 1978 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1979 1980 /* Release IRQs */ 1981 bnx2x_free_irq(bp); 1982load_error2: 1983 if (!BP_NOMCP(bp)) { 1984 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 1985 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 1986 } 1987 1988 bp->port.pmf = 0; 1989load_error1: 1990 bnx2x_napi_disable(bp); 1991load_error0: 1992 bnx2x_free_mem(bp); 1993 1994 return rc; 1995#endif /* ! BNX2X_STOP_ON_ERROR */ 1996} 1997 1998/* must be called with rtnl_lock */ 1999int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 2000{ 2001 int i; 2002 bool global = false; 2003 2004 if ((bp->state == BNX2X_STATE_CLOSED) || 2005 (bp->state == BNX2X_STATE_ERROR)) { 2006 /* We can get here if the driver has been unloaded 2007 * during parity error recovery and is either waiting for a 2008 * leader to complete or for other functions to unload and 2009 * then ifdown has been issued. In this case we want to 2010 * unload and let other functions to complete a recovery 2011 * process. 2012 */ 2013 bp->recovery_state = BNX2X_RECOVERY_DONE; 2014 bp->is_leader = 0; 2015 bnx2x_release_leader_lock(bp); 2016 smp_mb(); 2017 2018 DP(NETIF_MSG_HW, "Releasing a leadership...\n"); 2019 2020 return -EINVAL; 2021 } 2022 2023 /* 2024 * It's important to set the bp->state to the value different from 2025 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() 2026 * may restart the Tx from the NAPI context (see bnx2x_tx_int()). 2027 */ 2028 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 2029 smp_mb(); 2030 2031 /* Stop Tx */ 2032 bnx2x_tx_disable(bp); 2033 2034#ifdef BCM_CNIC 2035 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 2036#endif 2037 2038 bp->rx_mode = BNX2X_RX_MODE_NONE; 2039 2040 del_timer_sync(&bp->timer); 2041 2042 /* Set ALWAYS_ALIVE bit in shmem */ 2043 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; 2044 2045 bnx2x_drv_pulse(bp); 2046 2047 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2048 2049 /* Cleanup the chip if needed */ 2050 if (unload_mode != UNLOAD_RECOVERY) 2051 bnx2x_chip_cleanup(bp, unload_mode); 2052 else { 2053 /* Send the UNLOAD_REQUEST to the MCP */ 2054 bnx2x_send_unload_req(bp, unload_mode); 2055 2056 /* 2057 * Prevent transactions to host from the functions on the 2058 * engine that doesn't reset global blocks in case of global 2059 * attention once gloabl blocks are reset and gates are opened 2060 * (the engine which leader will perform the recovery 2061 * last). 2062 */ 2063 if (!CHIP_IS_E1x(bp)) 2064 bnx2x_pf_disable(bp); 2065 2066 /* Disable HW interrupts, NAPI */ 2067 bnx2x_netif_stop(bp, 1); 2068 2069 /* Release IRQs */ 2070 bnx2x_free_irq(bp); 2071 2072 /* Report UNLOAD_DONE to MCP */ 2073 bnx2x_send_unload_done(bp); 2074 } 2075 2076 /* 2077 * At this stage no more interrupts will arrive so we may safly clean 2078 * the queueable objects here in case they failed to get cleaned so far. 2079 */ 2080 bnx2x_squeeze_objects(bp); 2081 2082 /* There should be no more pending SP commands at this stage */ 2083 bp->sp_state = 0; 2084 2085 bp->port.pmf = 0; 2086 2087 /* Free SKBs, SGEs, TPA pool and driver internals */ 2088 bnx2x_free_skbs(bp); 2089 for_each_rx_queue(bp, i) 2090 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2091 2092 bnx2x_free_mem(bp); 2093 2094 bp->state = BNX2X_STATE_CLOSED; 2095 2096 /* Check if there are pending parity attentions. If there are - set 2097 * RECOVERY_IN_PROGRESS. 2098 */ 2099 if (bnx2x_chk_parity_attn(bp, &global, false)) { 2100 bnx2x_set_reset_in_progress(bp); 2101 2102 /* Set RESET_IS_GLOBAL if needed */ 2103 if (global) 2104 bnx2x_set_reset_global(bp); 2105 } 2106 2107 2108 /* The last driver must disable a "close the gate" if there is no 2109 * parity attention or "process kill" pending. 2110 */ 2111 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) 2112 bnx2x_disable_close_the_gate(bp); 2113 2114 return 0; 2115} 2116 2117int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) 2118{ 2119 u16 pmcsr; 2120 2121 /* If there is no power capability, silently succeed */ 2122 if (!bp->pm_cap) { 2123 DP(NETIF_MSG_HW, "No power capability. Breaking.\n"); 2124 return 0; 2125 } 2126 2127 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 2128 2129 switch (state) { 2130 case PCI_D0: 2131 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 2132 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) | 2133 PCI_PM_CTRL_PME_STATUS)); 2134 2135 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 2136 /* delay required during transition out of D3hot */ 2137 msleep(20); 2138 break; 2139 2140 case PCI_D3hot: 2141 /* If there are other clients above don't 2142 shut down the power */ 2143 if (atomic_read(&bp->pdev->enable_cnt) != 1) 2144 return 0; 2145 /* Don't shut down the power for emulation and FPGA */ 2146 if (CHIP_REV_IS_SLOW(bp)) 2147 return 0; 2148 2149 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 2150 pmcsr |= 3; 2151 2152 if (bp->wol) 2153 pmcsr |= PCI_PM_CTRL_PME_ENABLE; 2154 2155 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, 2156 pmcsr); 2157 2158 /* No more memory access after this point until 2159 * device is brought back to D0. 2160 */ 2161 break; 2162 2163 default: 2164 return -EINVAL; 2165 } 2166 return 0; 2167} 2168 2169/* 2170 * net_device service functions 2171 */ 2172int bnx2x_poll(struct napi_struct *napi, int budget) 2173{ 2174 int work_done = 0; 2175 u8 cos; 2176 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 2177 napi); 2178 struct bnx2x *bp = fp->bp; 2179 2180 while (1) { 2181#ifdef BNX2X_STOP_ON_ERROR 2182 if (unlikely(bp->panic)) { 2183 napi_complete(napi); 2184 return 0; 2185 } 2186#endif 2187 2188 for_each_cos_in_tx_queue(fp, cos) 2189 if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) 2190 bnx2x_tx_int(bp, &fp->txdata[cos]); 2191 2192 2193 if (bnx2x_has_rx_work(fp)) { 2194 work_done += bnx2x_rx_int(fp, budget - work_done); 2195 2196 /* must not complete if we consumed full budget */ 2197 if (work_done >= budget) 2198 break; 2199 } 2200 2201 /* Fall out from the NAPI loop if needed */ 2202 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 2203#ifdef BCM_CNIC 2204 /* No need to update SB for FCoE L2 ring as long as 2205 * it's connected to the default SB and the SB 2206 * has been updated when NAPI was scheduled. 2207 */ 2208 if (IS_FCOE_FP(fp)) { 2209 napi_complete(napi); 2210 break; 2211 } 2212#endif 2213 2214 bnx2x_update_fpsb_idx(fp); 2215 /* bnx2x_has_rx_work() reads the status block, 2216 * thus we need to ensure that status block indices 2217 * have been actually read (bnx2x_update_fpsb_idx) 2218 * prior to this check (bnx2x_has_rx_work) so that 2219 * we won't write the "newer" value of the status block 2220 * to IGU (if there was a DMA right after 2221 * bnx2x_has_rx_work and if there is no rmb, the memory 2222 * reading (bnx2x_update_fpsb_idx) may be postponed 2223 * to right before bnx2x_ack_sb). In this case there 2224 * will never be another interrupt until there is 2225 * another update of the status block, while there 2226 * is still unhandled work. 2227 */ 2228 rmb(); 2229 2230 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 2231 napi_complete(napi); 2232 /* Re-enable interrupts */ 2233 DP(NETIF_MSG_HW, 2234 "Update index to %d\n", fp->fp_hc_idx); 2235 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 2236 le16_to_cpu(fp->fp_hc_idx), 2237 IGU_INT_ENABLE, 1); 2238 break; 2239 } 2240 } 2241 } 2242 2243 return work_done; 2244} 2245 2246/* we split the first BD into headers and data BDs 2247 * to ease the pain of our fellow microcode engineers 2248 * we use one mapping for both BDs 2249 * So far this has only been observed to happen 2250 * in Other Operating Systems(TM) 2251 */ 2252static noinline u16 bnx2x_tx_split(struct bnx2x *bp, 2253 struct bnx2x_fp_txdata *txdata, 2254 struct sw_tx_bd *tx_buf, 2255 struct eth_tx_start_bd **tx_bd, u16 hlen, 2256 u16 bd_prod, int nbd) 2257{ 2258 struct eth_tx_start_bd *h_tx_bd = *tx_bd; 2259 struct eth_tx_bd *d_tx_bd; 2260 dma_addr_t mapping; 2261 int old_len = le16_to_cpu(h_tx_bd->nbytes); 2262 2263 /* first fix first BD */ 2264 h_tx_bd->nbd = cpu_to_le16(nbd); 2265 h_tx_bd->nbytes = cpu_to_le16(hlen); 2266 2267 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d " 2268 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi, 2269 h_tx_bd->addr_lo, h_tx_bd->nbd); 2270 2271 /* now get a new data BD 2272 * (after the pbd) and fill it */ 2273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2274 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 2275 2276 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), 2277 le32_to_cpu(h_tx_bd->addr_lo)) + hlen; 2278 2279 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2280 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2281 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen); 2282 2283 /* this marks the BD as one that has no individual mapping */ 2284 tx_buf->flags |= BNX2X_TSO_SPLIT_BD; 2285 2286 DP(NETIF_MSG_TX_QUEUED, 2287 "TSO split data size is %d (%x:%x)\n", 2288 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo); 2289 2290 /* update tx_bd */ 2291 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd; 2292 2293 return bd_prod; 2294} 2295 2296static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) 2297{ 2298 if (fix > 0) 2299 csum = (u16) ~csum_fold(csum_sub(csum, 2300 csum_partial(t_header - fix, fix, 0))); 2301 2302 else if (fix < 0) 2303 csum = (u16) ~csum_fold(csum_add(csum, 2304 csum_partial(t_header, -fix, 0))); 2305 2306 return swab16(csum); 2307} 2308 2309static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) 2310{ 2311 u32 rc; 2312 2313 if (skb->ip_summed != CHECKSUM_PARTIAL) 2314 rc = XMIT_PLAIN; 2315 2316 else { 2317 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { 2318 rc = XMIT_CSUM_V6; 2319 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2320 rc |= XMIT_CSUM_TCP; 2321 2322 } else { 2323 rc = XMIT_CSUM_V4; 2324 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2325 rc |= XMIT_CSUM_TCP; 2326 } 2327 } 2328 2329 if (skb_is_gso_v6(skb)) 2330 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; 2331 else if (skb_is_gso(skb)) 2332 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; 2333 2334 return rc; 2335} 2336 2337#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 2338/* check if packet requires linearization (packet is too fragmented) 2339 no need to check fragmentation if page size > 8K (there will be no 2340 violation to FW restrictions) */ 2341static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 2342 u32 xmit_type) 2343{ 2344 int to_copy = 0; 2345 int hlen = 0; 2346 int first_bd_sz = 0; 2347 2348 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */ 2349 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) { 2350 2351 if (xmit_type & XMIT_GSO) { 2352 unsigned short lso_mss = skb_shinfo(skb)->gso_size; 2353 /* Check if LSO packet needs to be copied: 2354 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ 2355 int wnd_size = MAX_FETCH_BD - 3; 2356 /* Number of windows to check */ 2357 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 2358 int wnd_idx = 0; 2359 int frag_idx = 0; 2360 u32 wnd_sum = 0; 2361 2362 /* Headers length */ 2363 hlen = (int)(skb_transport_header(skb) - skb->data) + 2364 tcp_hdrlen(skb); 2365 2366 /* Amount of data (w/o headers) on linear part of SKB*/ 2367 first_bd_sz = skb_headlen(skb) - hlen; 2368 2369 wnd_sum = first_bd_sz; 2370 2371 /* Calculate the first sum - it's special */ 2372 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) 2373 wnd_sum += 2374 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]); 2375 2376 /* If there was data on linear skb data - check it */ 2377 if (first_bd_sz > 0) { 2378 if (unlikely(wnd_sum < lso_mss)) { 2379 to_copy = 1; 2380 goto exit_lbl; 2381 } 2382 2383 wnd_sum -= first_bd_sz; 2384 } 2385 2386 /* Others are easier: run through the frag list and 2387 check all windows */ 2388 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { 2389 wnd_sum += 2390 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); 2391 2392 if (unlikely(wnd_sum < lso_mss)) { 2393 to_copy = 1; 2394 break; 2395 } 2396 wnd_sum -= 2397 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]); 2398 } 2399 } else { 2400 /* in non-LSO too fragmented packet should always 2401 be linearized */ 2402 to_copy = 1; 2403 } 2404 } 2405 2406exit_lbl: 2407 if (unlikely(to_copy)) 2408 DP(NETIF_MSG_TX_QUEUED, 2409 "Linearization IS REQUIRED for %s packet. " 2410 "num_frags %d hlen %d first_bd_sz %d\n", 2411 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO", 2412 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz); 2413 2414 return to_copy; 2415} 2416#endif 2417 2418static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, 2419 u32 xmit_type) 2420{ 2421 *parsing_data |= (skb_shinfo(skb)->gso_size << 2422 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 2423 ETH_TX_PARSE_BD_E2_LSO_MSS; 2424 if ((xmit_type & XMIT_GSO_V6) && 2425 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 2426 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 2427} 2428 2429/** 2430 * bnx2x_set_pbd_gso - update PBD in GSO case. 2431 * 2432 * @skb: packet skb 2433 * @pbd: parse BD 2434 * @xmit_type: xmit flags 2435 */ 2436static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, 2437 struct eth_tx_parse_bd_e1x *pbd, 2438 u32 xmit_type) 2439{ 2440 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2441 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); 2442 pbd->tcp_flags = pbd_tcp_flags(skb); 2443 2444 if (xmit_type & XMIT_GSO_V4) { 2445 pbd->ip_id = swab16(ip_hdr(skb)->id); 2446 pbd->tcp_pseudo_csum = 2447 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, 2448 ip_hdr(skb)->daddr, 2449 0, IPPROTO_TCP, 0)); 2450 2451 } else 2452 pbd->tcp_pseudo_csum = 2453 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2454 &ipv6_hdr(skb)->daddr, 2455 0, IPPROTO_TCP, 0)); 2456 2457 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN; 2458} 2459 2460/** 2461 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length 2462 * 2463 * @bp: driver handle 2464 * @skb: packet skb 2465 * @parsing_data: data to be updated 2466 * @xmit_type: xmit flags 2467 * 2468 * 57712 related 2469 */ 2470static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 2471 u32 *parsing_data, u32 xmit_type) 2472{ 2473 *parsing_data |= 2474 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << 2475 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & 2476 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; 2477 2478 if (xmit_type & XMIT_CSUM_TCP) { 2479 *parsing_data |= ((tcp_hdrlen(skb) / 4) << 2480 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & 2481 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; 2482 2483 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; 2484 } else 2485 /* We support checksum offload for TCP and UDP only. 2486 * No need to pass the UDP header length - it's a constant. 2487 */ 2488 return skb_transport_header(skb) + 2489 sizeof(struct udphdr) - skb->data; 2490} 2491 2492static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, 2493 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) 2494{ 2495 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 2496 2497 if (xmit_type & XMIT_CSUM_V4) 2498 tx_start_bd->bd_flags.as_bitfield |= 2499 ETH_TX_BD_FLAGS_IP_CSUM; 2500 else 2501 tx_start_bd->bd_flags.as_bitfield |= 2502 ETH_TX_BD_FLAGS_IPV6; 2503 2504 if (!(xmit_type & XMIT_CSUM_TCP)) 2505 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; 2506} 2507 2508/** 2509 * bnx2x_set_pbd_csum - update PBD with checksum and return header length 2510 * 2511 * @bp: driver handle 2512 * @skb: packet skb 2513 * @pbd: parse BD to be updated 2514 * @xmit_type: xmit flags 2515 */ 2516static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, 2517 struct eth_tx_parse_bd_e1x *pbd, 2518 u32 xmit_type) 2519{ 2520 u8 hlen = (skb_network_header(skb) - skb->data) >> 1; 2521 2522 /* for now NS flag is not used in Linux */ 2523 pbd->global_data = 2524 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << 2525 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); 2526 2527 pbd->ip_hlen_w = (skb_transport_header(skb) - 2528 skb_network_header(skb)) >> 1; 2529 2530 hlen += pbd->ip_hlen_w; 2531 2532 /* We support checksum offload for TCP and UDP only */ 2533 if (xmit_type & XMIT_CSUM_TCP) 2534 hlen += tcp_hdrlen(skb) / 2; 2535 else 2536 hlen += sizeof(struct udphdr) / 2; 2537 2538 pbd->total_hlen_w = cpu_to_le16(hlen); 2539 hlen = hlen*2; 2540 2541 if (xmit_type & XMIT_CSUM_TCP) { 2542 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); 2543 2544 } else { 2545 s8 fix = SKB_CS_OFF(skb); /* signed! */ 2546 2547 DP(NETIF_MSG_TX_QUEUED, 2548 "hlen %d fix %d csum before fix %x\n", 2549 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb)); 2550 2551 /* HW bug: fixup the CSUM */ 2552 pbd->tcp_pseudo_csum = 2553 bnx2x_csum_fix(skb_transport_header(skb), 2554 SKB_CS(skb), fix); 2555 2556 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n", 2557 pbd->tcp_pseudo_csum); 2558 } 2559 2560 return hlen; 2561} 2562 2563/* called with netif_tx_lock 2564 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 2565 * netif_wake_queue() 2566 */ 2567netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 2568{ 2569 struct bnx2x *bp = netdev_priv(dev); 2570 2571 struct bnx2x_fastpath *fp; 2572 struct netdev_queue *txq; 2573 struct bnx2x_fp_txdata *txdata; 2574 struct sw_tx_bd *tx_buf; 2575 struct eth_tx_start_bd *tx_start_bd, *first_bd; 2576 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 2577 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 2578 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 2579 u32 pbd_e2_parsing_data = 0; 2580 u16 pkt_prod, bd_prod; 2581 int nbd, txq_index, fp_index, txdata_index; 2582 dma_addr_t mapping; 2583 u32 xmit_type = bnx2x_xmit_type(bp, skb); 2584 int i; 2585 u8 hlen = 0; 2586 __le16 pkt_size = 0; 2587 struct ethhdr *eth; 2588 u8 mac_type = UNICAST_ADDRESS; 2589 2590#ifdef BNX2X_STOP_ON_ERROR 2591 if (unlikely(bp->panic)) 2592 return NETDEV_TX_BUSY; 2593#endif 2594 2595 txq_index = skb_get_queue_mapping(skb); 2596 txq = netdev_get_tx_queue(dev, txq_index); 2597 2598 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 2599 2600 /* decode the fastpath index and the cos index from the txq */ 2601 fp_index = TXQ_TO_FP(txq_index); 2602 txdata_index = TXQ_TO_COS(txq_index); 2603 2604#ifdef BCM_CNIC 2605 /* 2606 * Override the above for the FCoE queue: 2607 * - FCoE fp entry is right after the ETH entries. 2608 * - FCoE L2 queue uses bp->txdata[0] only. 2609 */ 2610 if (unlikely(!NO_FCOE(bp) && (txq_index == 2611 bnx2x_fcoe_tx(bp, txq_index)))) { 2612 fp_index = FCOE_IDX; 2613 txdata_index = 0; 2614 } 2615#endif 2616 2617 /* enable this debug print to view the transmission queue being used 2618 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n", 2619 txq_index, fp_index, txdata_index); */ 2620 2621 /* locate the fastpath and the txdata */ 2622 fp = &bp->fp[fp_index]; 2623 txdata = &fp->txdata[txdata_index]; 2624 2625 /* enable this debug print to view the tranmission details 2626 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d" 2627 " tx_data ptr %p fp pointer %p\n", 2628 txdata->cid, fp_index, txdata_index, txdata, fp); */ 2629 2630 if (unlikely(bnx2x_tx_avail(bp, txdata) < 2631 (skb_shinfo(skb)->nr_frags + 3))) { 2632 fp->eth_q_stats.driver_xoff++; 2633 netif_tx_stop_queue(txq); 2634 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 2635 return NETDEV_TX_BUSY; 2636 } 2637 2638 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " 2639 "protocol(%x,%x) gso type %x xmit_type %x\n", 2640 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 2641 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 2642 2643 eth = (struct ethhdr *)skb->data; 2644 2645 /* set flag according to packet type (UNICAST_ADDRESS is default)*/ 2646 if (unlikely(is_multicast_ether_addr(eth->h_dest))) { 2647 if (is_broadcast_ether_addr(eth->h_dest)) 2648 mac_type = BROADCAST_ADDRESS; 2649 else 2650 mac_type = MULTICAST_ADDRESS; 2651 } 2652 2653#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 2654 /* First, check if we need to linearize the skb (due to FW 2655 restrictions). No need to check fragmentation if page size > 8K 2656 (there will be no violation to FW restrictions) */ 2657 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 2658 /* Statistics of linearization */ 2659 bp->lin_cnt++; 2660 if (skb_linearize(skb) != 0) { 2661 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " 2662 "silently dropping this SKB\n"); 2663 dev_kfree_skb_any(skb); 2664 return NETDEV_TX_OK; 2665 } 2666 } 2667#endif 2668 /* Map skb linear data for DMA */ 2669 mapping = dma_map_single(&bp->pdev->dev, skb->data, 2670 skb_headlen(skb), DMA_TO_DEVICE); 2671 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 2672 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " 2673 "silently dropping this SKB\n"); 2674 dev_kfree_skb_any(skb); 2675 return NETDEV_TX_OK; 2676 } 2677 /* 2678 Please read carefully. First we use one BD which we mark as start, 2679 then we have a parsing info BD (used for TSO or xsum), 2680 and only then we have the rest of the TSO BDs. 2681 (don't forget to mark the last one as last, 2682 and to unmap only AFTER you write to the BD ...) 2683 And above all, all pdb sizes are in words - NOT DWORDS! 2684 */ 2685 2686 /* get current pkt produced now - advance it just before sending packet 2687 * since mapping of pages may fail and cause packet to be dropped 2688 */ 2689 pkt_prod = txdata->tx_pkt_prod; 2690 bd_prod = TX_BD(txdata->tx_bd_prod); 2691 2692 /* get a tx_buf and first BD 2693 * tx_start_bd may be changed during SPLIT, 2694 * but first_bd will always stay first 2695 */ 2696 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; 2697 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; 2698 first_bd = tx_start_bd; 2699 2700 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 2701 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, 2702 mac_type); 2703 2704 /* header nbd */ 2705 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); 2706 2707 /* remember the first BD of the packet */ 2708 tx_buf->first_bd = txdata->tx_bd_prod; 2709 tx_buf->skb = skb; 2710 tx_buf->flags = 0; 2711 2712 DP(NETIF_MSG_TX_QUEUED, 2713 "sending pkt %u @%p next_idx %u bd %u @%p\n", 2714 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); 2715 2716 if (vlan_tx_tag_present(skb)) { 2717 tx_start_bd->vlan_or_ethertype = 2718 cpu_to_le16(vlan_tx_tag_get(skb)); 2719 tx_start_bd->bd_flags.as_bitfield |= 2720 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 2721 } else 2722 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 2723 2724 /* turn on parsing and get a BD */ 2725 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2726 2727 if (xmit_type & XMIT_CSUM) 2728 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); 2729 2730 if (!CHIP_IS_E1x(bp)) { 2731 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; 2732 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 2733 /* Set PBD in checksum offload case */ 2734 if (xmit_type & XMIT_CSUM) 2735 hlen = bnx2x_set_pbd_csum_e2(bp, skb, 2736 &pbd_e2_parsing_data, 2737 xmit_type); 2738 if (IS_MF_SI(bp)) { 2739 /* 2740 * fill in the MAC addresses in the PBD - for local 2741 * switching 2742 */ 2743 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, 2744 &pbd_e2->src_mac_addr_mid, 2745 &pbd_e2->src_mac_addr_lo, 2746 eth->h_source); 2747 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, 2748 &pbd_e2->dst_mac_addr_mid, 2749 &pbd_e2->dst_mac_addr_lo, 2750 eth->h_dest); 2751 } 2752 } else { 2753 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; 2754 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); 2755 /* Set PBD in checksum offload case */ 2756 if (xmit_type & XMIT_CSUM) 2757 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type); 2758 2759 } 2760 2761 /* Setup the data pointer of the first BD of the packet */ 2762 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2763 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2764 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ 2765 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 2766 pkt_size = tx_start_bd->nbytes; 2767 2768 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d" 2769 " nbytes %d flags %x vlan %x\n", 2770 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 2771 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), 2772 tx_start_bd->bd_flags.as_bitfield, 2773 le16_to_cpu(tx_start_bd->vlan_or_ethertype)); 2774 2775 if (xmit_type & XMIT_GSO) { 2776 2777 DP(NETIF_MSG_TX_QUEUED, 2778 "TSO packet len %d hlen %d total len %d tso size %d\n", 2779 skb->len, hlen, skb_headlen(skb), 2780 skb_shinfo(skb)->gso_size); 2781 2782 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 2783 2784 if (unlikely(skb_headlen(skb) > hlen)) 2785 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, 2786 &tx_start_bd, hlen, 2787 bd_prod, ++nbd); 2788 if (!CHIP_IS_E1x(bp)) 2789 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, 2790 xmit_type); 2791 else 2792 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); 2793 } 2794 2795 /* Set the PBD's parsing_data field if not zero 2796 * (for the chips newer than 57711). 2797 */ 2798 if (pbd_e2_parsing_data) 2799 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); 2800 2801 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2802 2803 /* Handle fragmented skb */ 2804 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2805 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2806 2807 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, 2808 skb_frag_size(frag), DMA_TO_DEVICE); 2809 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 2810 unsigned int pkts_compl = 0, bytes_compl = 0; 2811 2812 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " 2813 "dropping packet...\n"); 2814 2815 /* we need unmap all buffers already mapped 2816 * for this SKB; 2817 * first_bd->nbd need to be properly updated 2818 * before call to bnx2x_free_tx_pkt 2819 */ 2820 first_bd->nbd = cpu_to_le16(nbd); 2821 bnx2x_free_tx_pkt(bp, txdata, 2822 TX_BD(txdata->tx_pkt_prod), 2823 &pkts_compl, &bytes_compl); 2824 return NETDEV_TX_OK; 2825 } 2826 2827 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2828 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 2829 if (total_pkt_bd == NULL) 2830 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; 2831 2832 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2833 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2834 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); 2835 le16_add_cpu(&pkt_size, skb_frag_size(frag)); 2836 nbd++; 2837 2838 DP(NETIF_MSG_TX_QUEUED, 2839 "frag %d bd @%p addr (%x:%x) nbytes %d\n", 2840 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo, 2841 le16_to_cpu(tx_data_bd->nbytes)); 2842 } 2843 2844 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); 2845 2846 /* update with actual num BDs */ 2847 first_bd->nbd = cpu_to_le16(nbd); 2848 2849 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2850 2851 /* now send a tx doorbell, counting the next BD 2852 * if the packet contains or ends with it 2853 */ 2854 if (TX_BD_POFF(bd_prod) < nbd) 2855 nbd++; 2856 2857 /* total_pkt_bytes should be set on the first data BD if 2858 * it's not an LSO packet and there is more than one 2859 * data BD. In this case pkt_size is limited by an MTU value. 2860 * However we prefer to set it for an LSO packet (while we don't 2861 * have to) in order to save some CPU cycles in a none-LSO 2862 * case, when we much more care about them. 2863 */ 2864 if (total_pkt_bd != NULL) 2865 total_pkt_bd->total_pkt_bytes = pkt_size; 2866 2867 if (pbd_e1x) 2868 DP(NETIF_MSG_TX_QUEUED, 2869 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" 2870 " tcp_flags %x xsum %x seq %u hlen %u\n", 2871 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, 2872 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, 2873 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, 2874 le16_to_cpu(pbd_e1x->total_hlen_w)); 2875 if (pbd_e2) 2876 DP(NETIF_MSG_TX_QUEUED, 2877 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", 2878 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, 2879 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, 2880 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, 2881 pbd_e2->parsing_data); 2882 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 2883 2884 netdev_tx_sent_queue(txq, skb->len); 2885 2886 txdata->tx_pkt_prod++; 2887 /* 2888 * Make sure that the BD data is updated before updating the producer 2889 * since FW might read the BD right after the producer is updated. 2890 * This is only applicable for weak-ordered memory model archs such 2891 * as IA-64. The following barrier is also mandatory since FW will 2892 * assumes packets must have BDs. 2893 */ 2894 wmb(); 2895 2896 txdata->tx_db.data.prod += nbd; 2897 barrier(); 2898 2899 DOORBELL(bp, txdata->cid, txdata->tx_db.raw); 2900 2901 mmiowb(); 2902 2903 txdata->tx_bd_prod += nbd; 2904 2905 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { 2906 netif_tx_stop_queue(txq); 2907 2908 /* paired memory barrier is in bnx2x_tx_int(), we have to keep 2909 * ordering of set_bit() in netif_tx_stop_queue() and read of 2910 * fp->bd_tx_cons */ 2911 smp_mb(); 2912 2913 fp->eth_q_stats.driver_xoff++; 2914 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) 2915 netif_tx_wake_queue(txq); 2916 } 2917 txdata->tx_pkt++; 2918 2919 return NETDEV_TX_OK; 2920} 2921 2922/** 2923 * bnx2x_setup_tc - routine to configure net_device for multi tc 2924 * 2925 * @netdev: net device to configure 2926 * @tc: number of traffic classes to enable 2927 * 2928 * callback connected to the ndo_setup_tc function pointer 2929 */ 2930int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) 2931{ 2932 int cos, prio, count, offset; 2933 struct bnx2x *bp = netdev_priv(dev); 2934 2935 /* setup tc must be called under rtnl lock */ 2936 ASSERT_RTNL(); 2937 2938 /* no traffic classes requested. aborting */ 2939 if (!num_tc) { 2940 netdev_reset_tc(dev); 2941 return 0; 2942 } 2943 2944 /* requested to support too many traffic classes */ 2945 if (num_tc > bp->max_cos) { 2946 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes" 2947 " requested: %d. max supported is %d\n", 2948 num_tc, bp->max_cos); 2949 return -EINVAL; 2950 } 2951 2952 /* declare amount of supported traffic classes */ 2953 if (netdev_set_num_tc(dev, num_tc)) { 2954 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n", 2955 num_tc); 2956 return -EINVAL; 2957 } 2958 2959 /* configure priority to traffic class mapping */ 2960 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { 2961 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); 2962 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 2963 prio, bp->prio_to_cos[prio]); 2964 } 2965 2966 2967 /* Use this configuration to diffrentiate tc0 from other COSes 2968 This can be used for ets or pfc, and save the effort of setting 2969 up a multio class queue disc or negotiating DCBX with a switch 2970 netdev_set_prio_tc_map(dev, 0, 0); 2971 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0); 2972 for (prio = 1; prio < 16; prio++) { 2973 netdev_set_prio_tc_map(dev, prio, 1); 2974 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1); 2975 } */ 2976 2977 /* configure traffic class to transmission queue mapping */ 2978 for (cos = 0; cos < bp->max_cos; cos++) { 2979 count = BNX2X_NUM_ETH_QUEUES(bp); 2980 offset = cos * MAX_TXQS_PER_COS; 2981 netdev_set_tc_queue(dev, cos, count, offset); 2982 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n", 2983 cos, offset, count); 2984 } 2985 2986 return 0; 2987} 2988 2989/* called with rtnl_lock */ 2990int bnx2x_change_mac_addr(struct net_device *dev, void *p) 2991{ 2992 struct sockaddr *addr = p; 2993 struct bnx2x *bp = netdev_priv(dev); 2994 int rc = 0; 2995 2996 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) 2997 return -EINVAL; 2998 2999#ifdef BCM_CNIC 3000 if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data)) 3001 return -EINVAL; 3002#endif 3003 3004 if (netif_running(dev)) { 3005 rc = bnx2x_set_eth_mac(bp, false); 3006 if (rc) 3007 return rc; 3008 } 3009 3010 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 3011 3012 if (netif_running(dev)) 3013 rc = bnx2x_set_eth_mac(bp, true); 3014 3015 return rc; 3016} 3017 3018static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) 3019{ 3020 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); 3021 struct bnx2x_fastpath *fp = &bp->fp[fp_index]; 3022 u8 cos; 3023 3024 /* Common */ 3025#ifdef BCM_CNIC 3026 if (IS_FCOE_IDX(fp_index)) { 3027 memset(sb, 0, sizeof(union host_hc_status_block)); 3028 fp->status_blk_mapping = 0; 3029 3030 } else { 3031#endif 3032 /* status blocks */ 3033 if (!CHIP_IS_E1x(bp)) 3034 BNX2X_PCI_FREE(sb->e2_sb, 3035 bnx2x_fp(bp, fp_index, 3036 status_blk_mapping), 3037 sizeof(struct host_hc_status_block_e2)); 3038 else 3039 BNX2X_PCI_FREE(sb->e1x_sb, 3040 bnx2x_fp(bp, fp_index, 3041 status_blk_mapping), 3042 sizeof(struct host_hc_status_block_e1x)); 3043#ifdef BCM_CNIC 3044 } 3045#endif 3046 /* Rx */ 3047 if (!skip_rx_queue(bp, fp_index)) { 3048 bnx2x_free_rx_bds(fp); 3049 3050 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 3051 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); 3052 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), 3053 bnx2x_fp(bp, fp_index, rx_desc_mapping), 3054 sizeof(struct eth_rx_bd) * NUM_RX_BD); 3055 3056 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), 3057 bnx2x_fp(bp, fp_index, rx_comp_mapping), 3058 sizeof(struct eth_fast_path_rx_cqe) * 3059 NUM_RCQ_BD); 3060 3061 /* SGE ring */ 3062 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); 3063 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), 3064 bnx2x_fp(bp, fp_index, rx_sge_mapping), 3065 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 3066 } 3067 3068 /* Tx */ 3069 if (!skip_tx_queue(bp, fp_index)) { 3070 /* fastpath tx rings: tx_buf tx_desc */ 3071 for_each_cos_in_tx_queue(fp, cos) { 3072 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 3073 3074 DP(BNX2X_MSG_SP, 3075 "freeing tx memory of fp %d cos %d cid %d\n", 3076 fp_index, cos, txdata->cid); 3077 3078 BNX2X_FREE(txdata->tx_buf_ring); 3079 BNX2X_PCI_FREE(txdata->tx_desc_ring, 3080 txdata->tx_desc_mapping, 3081 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 3082 } 3083 } 3084 /* end of fastpath */ 3085} 3086 3087void bnx2x_free_fp_mem(struct bnx2x *bp) 3088{ 3089 int i; 3090 for_each_queue(bp, i) 3091 bnx2x_free_fp_mem_at(bp, i); 3092} 3093 3094static inline void set_sb_shortcuts(struct bnx2x *bp, int index) 3095{ 3096 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); 3097 if (!CHIP_IS_E1x(bp)) { 3098 bnx2x_fp(bp, index, sb_index_values) = 3099 (__le16 *)status_blk.e2_sb->sb.index_values; 3100 bnx2x_fp(bp, index, sb_running_index) = 3101 (__le16 *)status_blk.e2_sb->sb.running_index; 3102 } else { 3103 bnx2x_fp(bp, index, sb_index_values) = 3104 (__le16 *)status_blk.e1x_sb->sb.index_values; 3105 bnx2x_fp(bp, index, sb_running_index) = 3106 (__le16 *)status_blk.e1x_sb->sb.running_index; 3107 } 3108} 3109 3110static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) 3111{ 3112 union host_hc_status_block *sb; 3113 struct bnx2x_fastpath *fp = &bp->fp[index]; 3114 int ring_size = 0; 3115 u8 cos; 3116 int rx_ring_size = 0; 3117 3118#ifdef BCM_CNIC 3119 if (IS_MF_ISCSI_SD(bp)) { 3120 rx_ring_size = MIN_RX_SIZE_NONTPA; 3121 bp->rx_ring_size = rx_ring_size; 3122 } else 3123#endif 3124 if (!bp->rx_ring_size) { 3125 3126 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3127 3128 /* allocate at least number of buffers required by FW */ 3129 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 3130 MIN_RX_SIZE_TPA, rx_ring_size); 3131 3132 bp->rx_ring_size = rx_ring_size; 3133 } else /* if rx_ring_size specified - use it */ 3134 rx_ring_size = bp->rx_ring_size; 3135 3136 /* Common */ 3137 sb = &bnx2x_fp(bp, index, status_blk); 3138#ifdef BCM_CNIC 3139 if (!IS_FCOE_IDX(index)) { 3140#endif 3141 /* status blocks */ 3142 if (!CHIP_IS_E1x(bp)) 3143 BNX2X_PCI_ALLOC(sb->e2_sb, 3144 &bnx2x_fp(bp, index, status_blk_mapping), 3145 sizeof(struct host_hc_status_block_e2)); 3146 else 3147 BNX2X_PCI_ALLOC(sb->e1x_sb, 3148 &bnx2x_fp(bp, index, status_blk_mapping), 3149 sizeof(struct host_hc_status_block_e1x)); 3150#ifdef BCM_CNIC 3151 } 3152#endif 3153 3154 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 3155 * set shortcuts for it. 3156 */ 3157 if (!IS_FCOE_IDX(index)) 3158 set_sb_shortcuts(bp, index); 3159 3160 /* Tx */ 3161 if (!skip_tx_queue(bp, index)) { 3162 /* fastpath tx rings: tx_buf tx_desc */ 3163 for_each_cos_in_tx_queue(fp, cos) { 3164 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; 3165 3166 DP(BNX2X_MSG_SP, "allocating tx memory of " 3167 "fp %d cos %d\n", 3168 index, cos); 3169 3170 BNX2X_ALLOC(txdata->tx_buf_ring, 3171 sizeof(struct sw_tx_bd) * NUM_TX_BD); 3172 BNX2X_PCI_ALLOC(txdata->tx_desc_ring, 3173 &txdata->tx_desc_mapping, 3174 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 3175 } 3176 } 3177 3178 /* Rx */ 3179 if (!skip_rx_queue(bp, index)) { 3180 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 3181 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), 3182 sizeof(struct sw_rx_bd) * NUM_RX_BD); 3183 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), 3184 &bnx2x_fp(bp, index, rx_desc_mapping), 3185 sizeof(struct eth_rx_bd) * NUM_RX_BD); 3186 3187 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), 3188 &bnx2x_fp(bp, index, rx_comp_mapping), 3189 sizeof(struct eth_fast_path_rx_cqe) * 3190 NUM_RCQ_BD); 3191 3192 /* SGE ring */ 3193 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), 3194 sizeof(struct sw_rx_page) * NUM_RX_SGE); 3195 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), 3196 &bnx2x_fp(bp, index, rx_sge_mapping), 3197 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 3198 /* RX BD ring */ 3199 bnx2x_set_next_page_rx_bd(fp); 3200 3201 /* CQ ring */ 3202 bnx2x_set_next_page_rx_cq(fp); 3203 3204 /* BDs */ 3205 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); 3206 if (ring_size < rx_ring_size) 3207 goto alloc_mem_err; 3208 } 3209 3210 return 0; 3211 3212/* handles low memory cases */ 3213alloc_mem_err: 3214 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", 3215 index, ring_size); 3216 /* FW will drop all packets if queue is not big enough, 3217 * In these cases we disable the queue 3218 * Min size is different for OOO, TPA and non-TPA queues 3219 */ 3220 if (ring_size < (fp->disable_tpa ? 3221 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { 3222 /* release memory allocated for this queue */ 3223 bnx2x_free_fp_mem_at(bp, index); 3224 return -ENOMEM; 3225 } 3226 return 0; 3227} 3228 3229int bnx2x_alloc_fp_mem(struct bnx2x *bp) 3230{ 3231 int i; 3232 3233 /** 3234 * 1. Allocate FP for leading - fatal if error 3235 * 2. {CNIC} Allocate FCoE FP - fatal if error 3236 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error 3237 * 4. Allocate RSS - fix number of queues if error 3238 */ 3239 3240 /* leading */ 3241 if (bnx2x_alloc_fp_mem_at(bp, 0)) 3242 return -ENOMEM; 3243 3244#ifdef BCM_CNIC 3245 if (!NO_FCOE(bp)) 3246 /* FCoE */ 3247 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) 3248 /* we will fail load process instead of mark 3249 * NO_FCOE_FLAG 3250 */ 3251 return -ENOMEM; 3252#endif 3253 3254 /* RSS */ 3255 for_each_nondefault_eth_queue(bp, i) 3256 if (bnx2x_alloc_fp_mem_at(bp, i)) 3257 break; 3258 3259 /* handle memory failures */ 3260 if (i != BNX2X_NUM_ETH_QUEUES(bp)) { 3261 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; 3262 3263 WARN_ON(delta < 0); 3264#ifdef BCM_CNIC 3265 /** 3266 * move non eth FPs next to last eth FP 3267 * must be done in that order 3268 * FCOE_IDX < FWD_IDX < OOO_IDX 3269 */ 3270 3271 /* move FCoE fp even NO_FCOE_FLAG is on */ 3272 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); 3273#endif 3274 bp->num_queues -= delta; 3275 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3276 bp->num_queues + delta, bp->num_queues); 3277 } 3278 3279 return 0; 3280} 3281 3282void bnx2x_free_mem_bp(struct bnx2x *bp) 3283{ 3284 kfree(bp->fp); 3285 kfree(bp->msix_table); 3286 kfree(bp->ilt); 3287} 3288 3289int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) 3290{ 3291 struct bnx2x_fastpath *fp; 3292 struct msix_entry *tbl; 3293 struct bnx2x_ilt *ilt; 3294 int msix_table_size = 0; 3295 3296 /* 3297 * The biggest MSI-X table we might need is as a maximum number of fast 3298 * path IGU SBs plus default SB (for PF). 3299 */ 3300 msix_table_size = bp->igu_sb_cnt + 1; 3301 3302 /* fp array: RSS plus CNIC related L2 queues */ 3303 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) * 3304 sizeof(*fp), GFP_KERNEL); 3305 if (!fp) 3306 goto alloc_err; 3307 bp->fp = fp; 3308 3309 /* msix table */ 3310 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL); 3311 if (!tbl) 3312 goto alloc_err; 3313 bp->msix_table = tbl; 3314 3315 /* ilt */ 3316 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL); 3317 if (!ilt) 3318 goto alloc_err; 3319 bp->ilt = ilt; 3320 3321 return 0; 3322alloc_err: 3323 bnx2x_free_mem_bp(bp); 3324 return -ENOMEM; 3325 3326} 3327 3328int bnx2x_reload_if_running(struct net_device *dev) 3329{ 3330 struct bnx2x *bp = netdev_priv(dev); 3331 3332 if (unlikely(!netif_running(dev))) 3333 return 0; 3334 3335 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 3336 return bnx2x_nic_load(bp, LOAD_NORMAL); 3337} 3338 3339int bnx2x_get_cur_phy_idx(struct bnx2x *bp) 3340{ 3341 u32 sel_phy_idx = 0; 3342 if (bp->link_params.num_phys <= 1) 3343 return INT_PHY; 3344 3345 if (bp->link_vars.link_up) { 3346 sel_phy_idx = EXT_PHY1; 3347 /* In case link is SERDES, check if the EXT_PHY2 is the one */ 3348 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && 3349 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) 3350 sel_phy_idx = EXT_PHY2; 3351 } else { 3352 3353 switch (bnx2x_phy_selection(&bp->link_params)) { 3354 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 3355 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: 3356 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 3357 sel_phy_idx = EXT_PHY1; 3358 break; 3359 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: 3360 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 3361 sel_phy_idx = EXT_PHY2; 3362 break; 3363 } 3364 } 3365 3366 return sel_phy_idx; 3367 3368} 3369int bnx2x_get_link_cfg_idx(struct bnx2x *bp) 3370{ 3371 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); 3372 /* 3373 * The selected actived PHY is always after swapping (in case PHY 3374 * swapping is enabled). So when swapping is enabled, we need to reverse 3375 * the configuration 3376 */ 3377 3378 if (bp->link_params.multi_phy_config & 3379 PORT_HW_CFG_PHY_SWAPPED_ENABLED) { 3380 if (sel_phy_idx == EXT_PHY1) 3381 sel_phy_idx = EXT_PHY2; 3382 else if (sel_phy_idx == EXT_PHY2) 3383 sel_phy_idx = EXT_PHY1; 3384 } 3385 return LINK_CONFIG_IDX(sel_phy_idx); 3386} 3387 3388#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 3389int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 3390{ 3391 struct bnx2x *bp = netdev_priv(dev); 3392 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 3393 3394 switch (type) { 3395 case NETDEV_FCOE_WWNN: 3396 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, 3397 cp->fcoe_wwn_node_name_lo); 3398 break; 3399 case NETDEV_FCOE_WWPN: 3400 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, 3401 cp->fcoe_wwn_port_name_lo); 3402 break; 3403 default: 3404 return -EINVAL; 3405 } 3406 3407 return 0; 3408} 3409#endif 3410 3411/* called with rtnl_lock */ 3412int bnx2x_change_mtu(struct net_device *dev, int new_mtu) 3413{ 3414 struct bnx2x *bp = netdev_priv(dev); 3415 3416 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 3417 pr_err("Handling parity error recovery. Try again later\n"); 3418 return -EAGAIN; 3419 } 3420 3421 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 3422 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) 3423 return -EINVAL; 3424 3425 /* This does not race with packet allocation 3426 * because the actual alloc size is 3427 * only updated as part of load 3428 */ 3429 dev->mtu = new_mtu; 3430 3431 return bnx2x_reload_if_running(dev); 3432} 3433 3434netdev_features_t bnx2x_fix_features(struct net_device *dev, 3435 netdev_features_t features) 3436{ 3437 struct bnx2x *bp = netdev_priv(dev); 3438 3439 /* TPA requires Rx CSUM offloading */ 3440 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) 3441 features &= ~NETIF_F_LRO; 3442 3443 return features; 3444} 3445 3446int bnx2x_set_features(struct net_device *dev, netdev_features_t features) 3447{ 3448 struct bnx2x *bp = netdev_priv(dev); 3449 u32 flags = bp->flags; 3450 bool bnx2x_reload = false; 3451 3452 if (features & NETIF_F_LRO) 3453 flags |= TPA_ENABLE_FLAG; 3454 else 3455 flags &= ~TPA_ENABLE_FLAG; 3456 3457 if (features & NETIF_F_LOOPBACK) { 3458 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { 3459 bp->link_params.loopback_mode = LOOPBACK_BMAC; 3460 bnx2x_reload = true; 3461 } 3462 } else { 3463 if (bp->link_params.loopback_mode != LOOPBACK_NONE) { 3464 bp->link_params.loopback_mode = LOOPBACK_NONE; 3465 bnx2x_reload = true; 3466 } 3467 } 3468 3469 if (flags ^ bp->flags) { 3470 bp->flags = flags; 3471 bnx2x_reload = true; 3472 } 3473 3474 if (bnx2x_reload) { 3475 if (bp->recovery_state == BNX2X_RECOVERY_DONE) 3476 return bnx2x_reload_if_running(dev); 3477 /* else: bnx2x_nic_load() will be called at end of recovery */ 3478 } 3479 3480 return 0; 3481} 3482 3483void bnx2x_tx_timeout(struct net_device *dev) 3484{ 3485 struct bnx2x *bp = netdev_priv(dev); 3486 3487#ifdef BNX2X_STOP_ON_ERROR 3488 if (!bp->panic) 3489 bnx2x_panic(); 3490#endif 3491 3492 smp_mb__before_clear_bit(); 3493 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); 3494 smp_mb__after_clear_bit(); 3495 3496 /* This allows the netif to be shutdown gracefully before resetting */ 3497 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3498} 3499 3500int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 3501{ 3502 struct net_device *dev = pci_get_drvdata(pdev); 3503 struct bnx2x *bp; 3504 3505 if (!dev) { 3506 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 3507 return -ENODEV; 3508 } 3509 bp = netdev_priv(dev); 3510 3511 rtnl_lock(); 3512 3513 pci_save_state(pdev); 3514 3515 if (!netif_running(dev)) { 3516 rtnl_unlock(); 3517 return 0; 3518 } 3519 3520 netif_device_detach(dev); 3521 3522 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 3523 3524 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 3525 3526 rtnl_unlock(); 3527 3528 return 0; 3529} 3530 3531int bnx2x_resume(struct pci_dev *pdev) 3532{ 3533 struct net_device *dev = pci_get_drvdata(pdev); 3534 struct bnx2x *bp; 3535 int rc; 3536 3537 if (!dev) { 3538 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n"); 3539 return -ENODEV; 3540 } 3541 bp = netdev_priv(dev); 3542 3543 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 3544 pr_err("Handling parity error recovery. Try again later\n"); 3545 return -EAGAIN; 3546 } 3547 3548 rtnl_lock(); 3549 3550 pci_restore_state(pdev); 3551 3552 if (!netif_running(dev)) { 3553 rtnl_unlock(); 3554 return 0; 3555 } 3556 3557 bnx2x_set_power_state(bp, PCI_D0); 3558 netif_device_attach(dev); 3559 3560 /* Since the chip was reset, clear the FW sequence number */ 3561 bp->fw_seq = 0; 3562 rc = bnx2x_nic_load(bp, LOAD_OPEN); 3563 3564 rtnl_unlock(); 3565 3566 return rc; 3567} 3568 3569 3570void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, 3571 u32 cid) 3572{ 3573 /* ustorm cxt validation */ 3574 cxt->ustorm_ag_context.cdu_usage = 3575 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 3576 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); 3577 /* xcontext validation */ 3578 cxt->xstorm_ag_context.cdu_reserved = 3579 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), 3580 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 3581} 3582 3583static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, 3584 u8 fw_sb_id, u8 sb_index, 3585 u8 ticks) 3586{ 3587 3588 u32 addr = BAR_CSTRORM_INTMEM + 3589 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); 3590 REG_WR8(bp, addr, ticks); 3591 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", 3592 port, fw_sb_id, sb_index, ticks); 3593} 3594 3595static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, 3596 u16 fw_sb_id, u8 sb_index, 3597 u8 disable) 3598{ 3599 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 3600 u32 addr = BAR_CSTRORM_INTMEM + 3601 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); 3602 u16 flags = REG_RD16(bp, addr); 3603 /* clear and set */ 3604 flags &= ~HC_INDEX_DATA_HC_ENABLED; 3605 flags |= enable_flag; 3606 REG_WR16(bp, addr, flags); 3607 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", 3608 port, fw_sb_id, sb_index, disable); 3609} 3610 3611void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, 3612 u8 sb_index, u8 disable, u16 usec) 3613{ 3614 int port = BP_PORT(bp); 3615 u8 ticks = usec / BNX2X_BTR; 3616 3617 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); 3618 3619 disable = disable ? 1 : (usec ? 0 : 1); 3620 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); 3621} 3622