bcmgenet.c revision b2cde2cc71f2382e4a4bfaaacd5263bd93f1e0d2
1/* 2 * Broadcom GENET (Gigabit Ethernet) controller driver 3 * 4 * Copyright (c) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 */ 19 20#define pr_fmt(fmt) "bcmgenet: " fmt 21 22#include <linux/kernel.h> 23#include <linux/module.h> 24#include <linux/sched.h> 25#include <linux/types.h> 26#include <linux/fcntl.h> 27#include <linux/interrupt.h> 28#include <linux/string.h> 29#include <linux/if_ether.h> 30#include <linux/init.h> 31#include <linux/errno.h> 32#include <linux/delay.h> 33#include <linux/platform_device.h> 34#include <linux/dma-mapping.h> 35#include <linux/pm.h> 36#include <linux/clk.h> 37#include <linux/version.h> 38#include <linux/of.h> 39#include <linux/of_address.h> 40#include <linux/of_irq.h> 41#include <linux/of_net.h> 42#include <linux/of_platform.h> 43#include <net/arp.h> 44 45#include <linux/mii.h> 46#include <linux/ethtool.h> 47#include <linux/netdevice.h> 48#include <linux/inetdevice.h> 49#include <linux/etherdevice.h> 50#include <linux/skbuff.h> 51#include <linux/in.h> 52#include <linux/ip.h> 53#include <linux/ipv6.h> 54#include <linux/phy.h> 55 56#include <asm/unaligned.h> 57 58#include "bcmgenet.h" 59 60/* Maximum number of hardware queues, downsized if needed */ 61#define GENET_MAX_MQ_CNT 4 62 63/* Default highest priority queue for multi queue support */ 64#define GENET_Q0_PRIORITY 0 65 66#define GENET_DEFAULT_BD_CNT \ 67 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) 68 69#define RX_BUF_LENGTH 2048 70#define SKB_ALIGNMENT 32 71 72/* Tx/Rx DMA register offset, skip 256 descriptors */ 73#define WORDS_PER_BD(p) (p->hw_params->words_per_bd) 74#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) 75 76#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ 77 TOTAL_DESC * DMA_DESC_SIZE) 78 79#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ 80 TOTAL_DESC * DMA_DESC_SIZE) 81 82static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, 83 void __iomem *d, u32 value) 84{ 85 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); 86} 87 88static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, 89 void __iomem *d) 90{ 91 return __raw_readl(d + DMA_DESC_LENGTH_STATUS); 92} 93 94static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, 95 void __iomem *d, 96 dma_addr_t addr) 97{ 98 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); 99 100 /* Register writes to GISB bus can take couple hundred nanoseconds 101 * and are done for each packet, save these expensive writes unless 102 * the platform is explicitely configured for 64-bits/LPAE. 103 */ 104#ifdef CONFIG_PHYS_ADDR_T_64BIT 105 if (priv->hw_params->flags & GENET_HAS_40BITS) 106 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); 107#endif 108} 109 110/* Combined address + length/status setter */ 111static inline void dmadesc_set(struct bcmgenet_priv *priv, 112 void __iomem *d, dma_addr_t addr, u32 val) 113{ 114 dmadesc_set_length_status(priv, d, val); 115 dmadesc_set_addr(priv, d, addr); 116} 117 118static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, 119 void __iomem *d) 120{ 121 dma_addr_t addr; 122 123 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); 124 125 /* Register writes to GISB bus can take couple hundred nanoseconds 126 * and are done for each packet, save these expensive writes unless 127 * the platform is explicitely configured for 64-bits/LPAE. 128 */ 129#ifdef CONFIG_PHYS_ADDR_T_64BIT 130 if (priv->hw_params->flags & GENET_HAS_40BITS) 131 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; 132#endif 133 return addr; 134} 135 136#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" 137 138#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 139 NETIF_MSG_LINK) 140 141static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) 142{ 143 if (GENET_IS_V1(priv)) 144 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); 145 else 146 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); 147} 148 149static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) 150{ 151 if (GENET_IS_V1(priv)) 152 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); 153 else 154 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); 155} 156 157/* These macros are defined to deal with register map change 158 * between GENET1.1 and GENET2. Only those currently being used 159 * by driver are defined. 160 */ 161static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) 162{ 163 if (GENET_IS_V1(priv)) 164 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); 165 else 166 return __raw_readl(priv->base + 167 priv->hw_params->tbuf_offset + TBUF_CTRL); 168} 169 170static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) 171{ 172 if (GENET_IS_V1(priv)) 173 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); 174 else 175 __raw_writel(val, priv->base + 176 priv->hw_params->tbuf_offset + TBUF_CTRL); 177} 178 179static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) 180{ 181 if (GENET_IS_V1(priv)) 182 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); 183 else 184 return __raw_readl(priv->base + 185 priv->hw_params->tbuf_offset + TBUF_BP_MC); 186} 187 188static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) 189{ 190 if (GENET_IS_V1(priv)) 191 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); 192 else 193 __raw_writel(val, priv->base + 194 priv->hw_params->tbuf_offset + TBUF_BP_MC); 195} 196 197/* RX/TX DMA register accessors */ 198enum dma_reg { 199 DMA_RING_CFG = 0, 200 DMA_CTRL, 201 DMA_STATUS, 202 DMA_SCB_BURST_SIZE, 203 DMA_ARB_CTRL, 204 DMA_PRIORITY, 205 DMA_RING_PRIORITY, 206}; 207 208static const u8 bcmgenet_dma_regs_v3plus[] = { 209 [DMA_RING_CFG] = 0x00, 210 [DMA_CTRL] = 0x04, 211 [DMA_STATUS] = 0x08, 212 [DMA_SCB_BURST_SIZE] = 0x0C, 213 [DMA_ARB_CTRL] = 0x2C, 214 [DMA_PRIORITY] = 0x30, 215 [DMA_RING_PRIORITY] = 0x38, 216}; 217 218static const u8 bcmgenet_dma_regs_v2[] = { 219 [DMA_RING_CFG] = 0x00, 220 [DMA_CTRL] = 0x04, 221 [DMA_STATUS] = 0x08, 222 [DMA_SCB_BURST_SIZE] = 0x0C, 223 [DMA_ARB_CTRL] = 0x30, 224 [DMA_PRIORITY] = 0x34, 225 [DMA_RING_PRIORITY] = 0x3C, 226}; 227 228static const u8 bcmgenet_dma_regs_v1[] = { 229 [DMA_CTRL] = 0x00, 230 [DMA_STATUS] = 0x04, 231 [DMA_SCB_BURST_SIZE] = 0x0C, 232 [DMA_ARB_CTRL] = 0x30, 233 [DMA_PRIORITY] = 0x34, 234 [DMA_RING_PRIORITY] = 0x3C, 235}; 236 237/* Set at runtime once bcmgenet version is known */ 238static const u8 *bcmgenet_dma_regs; 239 240static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) 241{ 242 return netdev_priv(dev_get_drvdata(dev)); 243} 244 245static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, 246 enum dma_reg r) 247{ 248 return __raw_readl(priv->base + GENET_TDMA_REG_OFF + 249 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 250} 251 252static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, 253 u32 val, enum dma_reg r) 254{ 255 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + 256 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 257} 258 259static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, 260 enum dma_reg r) 261{ 262 return __raw_readl(priv->base + GENET_RDMA_REG_OFF + 263 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 264} 265 266static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, 267 u32 val, enum dma_reg r) 268{ 269 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + 270 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); 271} 272 273/* RDMA/TDMA ring registers and accessors 274 * we merge the common fields and just prefix with T/D the registers 275 * having different meaning depending on the direction 276 */ 277enum dma_ring_reg { 278 TDMA_READ_PTR = 0, 279 RDMA_WRITE_PTR = TDMA_READ_PTR, 280 TDMA_READ_PTR_HI, 281 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, 282 TDMA_CONS_INDEX, 283 RDMA_PROD_INDEX = TDMA_CONS_INDEX, 284 TDMA_PROD_INDEX, 285 RDMA_CONS_INDEX = TDMA_PROD_INDEX, 286 DMA_RING_BUF_SIZE, 287 DMA_START_ADDR, 288 DMA_START_ADDR_HI, 289 DMA_END_ADDR, 290 DMA_END_ADDR_HI, 291 DMA_MBUF_DONE_THRESH, 292 TDMA_FLOW_PERIOD, 293 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, 294 TDMA_WRITE_PTR, 295 RDMA_READ_PTR = TDMA_WRITE_PTR, 296 TDMA_WRITE_PTR_HI, 297 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI 298}; 299 300/* GENET v4 supports 40-bits pointer addressing 301 * for obvious reasons the LO and HI word parts 302 * are contiguous, but this offsets the other 303 * registers. 304 */ 305static const u8 genet_dma_ring_regs_v4[] = { 306 [TDMA_READ_PTR] = 0x00, 307 [TDMA_READ_PTR_HI] = 0x04, 308 [TDMA_CONS_INDEX] = 0x08, 309 [TDMA_PROD_INDEX] = 0x0C, 310 [DMA_RING_BUF_SIZE] = 0x10, 311 [DMA_START_ADDR] = 0x14, 312 [DMA_START_ADDR_HI] = 0x18, 313 [DMA_END_ADDR] = 0x1C, 314 [DMA_END_ADDR_HI] = 0x20, 315 [DMA_MBUF_DONE_THRESH] = 0x24, 316 [TDMA_FLOW_PERIOD] = 0x28, 317 [TDMA_WRITE_PTR] = 0x2C, 318 [TDMA_WRITE_PTR_HI] = 0x30, 319}; 320 321static const u8 genet_dma_ring_regs_v123[] = { 322 [TDMA_READ_PTR] = 0x00, 323 [TDMA_CONS_INDEX] = 0x04, 324 [TDMA_PROD_INDEX] = 0x08, 325 [DMA_RING_BUF_SIZE] = 0x0C, 326 [DMA_START_ADDR] = 0x10, 327 [DMA_END_ADDR] = 0x14, 328 [DMA_MBUF_DONE_THRESH] = 0x18, 329 [TDMA_FLOW_PERIOD] = 0x1C, 330 [TDMA_WRITE_PTR] = 0x20, 331}; 332 333/* Set at runtime once GENET version is known */ 334static const u8 *genet_dma_ring_regs; 335 336static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, 337 unsigned int ring, 338 enum dma_ring_reg r) 339{ 340 return __raw_readl(priv->base + GENET_TDMA_REG_OFF + 341 (DMA_RING_SIZE * ring) + 342 genet_dma_ring_regs[r]); 343} 344 345static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, 346 unsigned int ring, 347 u32 val, 348 enum dma_ring_reg r) 349{ 350 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + 351 (DMA_RING_SIZE * ring) + 352 genet_dma_ring_regs[r]); 353} 354 355static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, 356 unsigned int ring, 357 enum dma_ring_reg r) 358{ 359 return __raw_readl(priv->base + GENET_RDMA_REG_OFF + 360 (DMA_RING_SIZE * ring) + 361 genet_dma_ring_regs[r]); 362} 363 364static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, 365 unsigned int ring, 366 u32 val, 367 enum dma_ring_reg r) 368{ 369 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + 370 (DMA_RING_SIZE * ring) + 371 genet_dma_ring_regs[r]); 372} 373 374static int bcmgenet_get_settings(struct net_device *dev, 375 struct ethtool_cmd *cmd) 376{ 377 struct bcmgenet_priv *priv = netdev_priv(dev); 378 379 if (!netif_running(dev)) 380 return -EINVAL; 381 382 if (!priv->phydev) 383 return -ENODEV; 384 385 return phy_ethtool_gset(priv->phydev, cmd); 386} 387 388static int bcmgenet_set_settings(struct net_device *dev, 389 struct ethtool_cmd *cmd) 390{ 391 struct bcmgenet_priv *priv = netdev_priv(dev); 392 393 if (!netif_running(dev)) 394 return -EINVAL; 395 396 if (!priv->phydev) 397 return -ENODEV; 398 399 return phy_ethtool_sset(priv->phydev, cmd); 400} 401 402static int bcmgenet_set_rx_csum(struct net_device *dev, 403 netdev_features_t wanted) 404{ 405 struct bcmgenet_priv *priv = netdev_priv(dev); 406 u32 rbuf_chk_ctrl; 407 bool rx_csum_en; 408 409 rx_csum_en = !!(wanted & NETIF_F_RXCSUM); 410 411 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); 412 413 /* enable rx checksumming */ 414 if (rx_csum_en) 415 rbuf_chk_ctrl |= RBUF_RXCHK_EN; 416 else 417 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; 418 priv->desc_rxchk_en = rx_csum_en; 419 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); 420 421 return 0; 422} 423 424static int bcmgenet_set_tx_csum(struct net_device *dev, 425 netdev_features_t wanted) 426{ 427 struct bcmgenet_priv *priv = netdev_priv(dev); 428 bool desc_64b_en; 429 u32 tbuf_ctrl, rbuf_ctrl; 430 431 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); 432 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); 433 434 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 435 436 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ 437 if (desc_64b_en) { 438 tbuf_ctrl |= RBUF_64B_EN; 439 rbuf_ctrl |= RBUF_64B_EN; 440 } else { 441 tbuf_ctrl &= ~RBUF_64B_EN; 442 rbuf_ctrl &= ~RBUF_64B_EN; 443 } 444 priv->desc_64b_en = desc_64b_en; 445 446 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); 447 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); 448 449 return 0; 450} 451 452static int bcmgenet_set_features(struct net_device *dev, 453 netdev_features_t features) 454{ 455 netdev_features_t changed = features ^ dev->features; 456 netdev_features_t wanted = dev->wanted_features; 457 int ret = 0; 458 459 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 460 ret = bcmgenet_set_tx_csum(dev, wanted); 461 if (changed & (NETIF_F_RXCSUM)) 462 ret = bcmgenet_set_rx_csum(dev, wanted); 463 464 return ret; 465} 466 467static u32 bcmgenet_get_msglevel(struct net_device *dev) 468{ 469 struct bcmgenet_priv *priv = netdev_priv(dev); 470 471 return priv->msg_enable; 472} 473 474static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) 475{ 476 struct bcmgenet_priv *priv = netdev_priv(dev); 477 478 priv->msg_enable = level; 479} 480 481/* standard ethtool support functions. */ 482enum bcmgenet_stat_type { 483 BCMGENET_STAT_NETDEV = -1, 484 BCMGENET_STAT_MIB_RX, 485 BCMGENET_STAT_MIB_TX, 486 BCMGENET_STAT_RUNT, 487 BCMGENET_STAT_MISC, 488}; 489 490struct bcmgenet_stats { 491 char stat_string[ETH_GSTRING_LEN]; 492 int stat_sizeof; 493 int stat_offset; 494 enum bcmgenet_stat_type type; 495 /* reg offset from UMAC base for misc counters */ 496 u16 reg_offset; 497}; 498 499#define STAT_NETDEV(m) { \ 500 .stat_string = __stringify(m), \ 501 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ 502 .stat_offset = offsetof(struct net_device_stats, m), \ 503 .type = BCMGENET_STAT_NETDEV, \ 504} 505 506#define STAT_GENET_MIB(str, m, _type) { \ 507 .stat_string = str, \ 508 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ 509 .stat_offset = offsetof(struct bcmgenet_priv, m), \ 510 .type = _type, \ 511} 512 513#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) 514#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) 515#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) 516 517#define STAT_GENET_MISC(str, m, offset) { \ 518 .stat_string = str, \ 519 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ 520 .stat_offset = offsetof(struct bcmgenet_priv, m), \ 521 .type = BCMGENET_STAT_MISC, \ 522 .reg_offset = offset, \ 523} 524 525 526/* There is a 0xC gap between the end of RX and beginning of TX stats and then 527 * between the end of TX stats and the beginning of the RX RUNT 528 */ 529#define BCMGENET_STAT_OFFSET 0xc 530 531/* Hardware counters must be kept in sync because the order/offset 532 * is important here (order in structure declaration = order in hardware) 533 */ 534static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { 535 /* general stats */ 536 STAT_NETDEV(rx_packets), 537 STAT_NETDEV(tx_packets), 538 STAT_NETDEV(rx_bytes), 539 STAT_NETDEV(tx_bytes), 540 STAT_NETDEV(rx_errors), 541 STAT_NETDEV(tx_errors), 542 STAT_NETDEV(rx_dropped), 543 STAT_NETDEV(tx_dropped), 544 STAT_NETDEV(multicast), 545 /* UniMAC RSV counters */ 546 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 547 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 548 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 549 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 550 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 551 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 552 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 553 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 554 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 555 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 556 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), 557 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), 558 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), 559 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), 560 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), 561 STAT_GENET_MIB_RX("rx_control", mib.rx.cf), 562 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), 563 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), 564 STAT_GENET_MIB_RX("rx_align", mib.rx.aln), 565 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), 566 STAT_GENET_MIB_RX("rx_code", mib.rx.cde), 567 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), 568 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), 569 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), 570 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), 571 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), 572 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), 573 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), 574 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), 575 /* UniMAC TSV counters */ 576 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 577 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 578 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 579 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 580 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 581 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 582 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 583 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 584 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 585 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 586 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), 587 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), 588 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), 589 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), 590 STAT_GENET_MIB_TX("tx_control", mib.tx.cf), 591 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), 592 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), 593 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), 594 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), 595 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), 596 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), 597 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), 598 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), 599 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), 600 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), 601 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), 602 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), 603 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), 604 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), 605 /* UniMAC RUNT counters */ 606 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 607 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 608 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 609 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 610 /* Misc UniMAC counters */ 611 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, 612 UMAC_RBUF_OVFL_CNT), 613 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 614 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 615}; 616 617#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 618 619static void bcmgenet_get_drvinfo(struct net_device *dev, 620 struct ethtool_drvinfo *info) 621{ 622 strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); 623 strlcpy(info->version, "v2.0", sizeof(info->version)); 624 info->n_stats = BCMGENET_STATS_LEN; 625 626} 627 628static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) 629{ 630 switch (string_set) { 631 case ETH_SS_STATS: 632 return BCMGENET_STATS_LEN; 633 default: 634 return -EOPNOTSUPP; 635 } 636} 637 638static void bcmgenet_get_strings(struct net_device *dev, 639 u32 stringset, u8 *data) 640{ 641 int i; 642 643 switch (stringset) { 644 case ETH_SS_STATS: 645 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 646 memcpy(data + i * ETH_GSTRING_LEN, 647 bcmgenet_gstrings_stats[i].stat_string, 648 ETH_GSTRING_LEN); 649 } 650 break; 651 } 652} 653 654static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) 655{ 656 int i, j = 0; 657 658 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 659 const struct bcmgenet_stats *s; 660 u8 offset = 0; 661 u32 val = 0; 662 char *p; 663 664 s = &bcmgenet_gstrings_stats[i]; 665 switch (s->type) { 666 case BCMGENET_STAT_NETDEV: 667 continue; 668 case BCMGENET_STAT_MIB_RX: 669 case BCMGENET_STAT_MIB_TX: 670 case BCMGENET_STAT_RUNT: 671 if (s->type != BCMGENET_STAT_MIB_RX) 672 offset = BCMGENET_STAT_OFFSET; 673 val = bcmgenet_umac_readl(priv, UMAC_MIB_START + 674 j + offset); 675 break; 676 case BCMGENET_STAT_MISC: 677 val = bcmgenet_umac_readl(priv, s->reg_offset); 678 /* clear if overflowed */ 679 if (val == ~0) 680 bcmgenet_umac_writel(priv, 0, s->reg_offset); 681 break; 682 } 683 684 j += s->stat_sizeof; 685 p = (char *)priv + s->stat_offset; 686 *(u32 *)p = val; 687 } 688} 689 690static void bcmgenet_get_ethtool_stats(struct net_device *dev, 691 struct ethtool_stats *stats, 692 u64 *data) 693{ 694 struct bcmgenet_priv *priv = netdev_priv(dev); 695 int i; 696 697 if (netif_running(dev)) 698 bcmgenet_update_mib_counters(priv); 699 700 for (i = 0; i < BCMGENET_STATS_LEN; i++) { 701 const struct bcmgenet_stats *s; 702 char *p; 703 704 s = &bcmgenet_gstrings_stats[i]; 705 if (s->type == BCMGENET_STAT_NETDEV) 706 p = (char *)&dev->stats; 707 else 708 p = (char *)priv; 709 p += s->stat_offset; 710 data[i] = *(u32 *)p; 711 } 712} 713 714/* standard ethtool support functions. */ 715static struct ethtool_ops bcmgenet_ethtool_ops = { 716 .get_strings = bcmgenet_get_strings, 717 .get_sset_count = bcmgenet_get_sset_count, 718 .get_ethtool_stats = bcmgenet_get_ethtool_stats, 719 .get_settings = bcmgenet_get_settings, 720 .set_settings = bcmgenet_set_settings, 721 .get_drvinfo = bcmgenet_get_drvinfo, 722 .get_link = ethtool_op_get_link, 723 .get_msglevel = bcmgenet_get_msglevel, 724 .set_msglevel = bcmgenet_set_msglevel, 725}; 726 727/* Power down the unimac, based on mode. */ 728static void bcmgenet_power_down(struct bcmgenet_priv *priv, 729 enum bcmgenet_power_mode mode) 730{ 731 u32 reg; 732 733 switch (mode) { 734 case GENET_POWER_CABLE_SENSE: 735 phy_detach(priv->phydev); 736 break; 737 738 case GENET_POWER_PASSIVE: 739 /* Power down LED */ 740 bcmgenet_mii_reset(priv->dev); 741 if (priv->hw_params->flags & GENET_HAS_EXT) { 742 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 743 reg |= (EXT_PWR_DOWN_PHY | 744 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); 745 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 746 } 747 break; 748 default: 749 break; 750 } 751} 752 753static void bcmgenet_power_up(struct bcmgenet_priv *priv, 754 enum bcmgenet_power_mode mode) 755{ 756 u32 reg; 757 758 if (!(priv->hw_params->flags & GENET_HAS_EXT)) 759 return; 760 761 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 762 763 switch (mode) { 764 case GENET_POWER_PASSIVE: 765 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | 766 EXT_PWR_DOWN_BIAS); 767 /* fallthrough */ 768 case GENET_POWER_CABLE_SENSE: 769 /* enable APD */ 770 reg |= EXT_PWR_DN_EN_LD; 771 break; 772 default: 773 break; 774 } 775 776 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 777 bcmgenet_mii_reset(priv->dev); 778} 779 780/* ioctl handle special commands that are not present in ethtool. */ 781static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 782{ 783 struct bcmgenet_priv *priv = netdev_priv(dev); 784 int val = 0; 785 786 if (!netif_running(dev)) 787 return -EINVAL; 788 789 switch (cmd) { 790 case SIOCGMIIPHY: 791 case SIOCGMIIREG: 792 case SIOCSMIIREG: 793 if (!priv->phydev) 794 val = -ENODEV; 795 else 796 val = phy_mii_ioctl(priv->phydev, rq, cmd); 797 break; 798 799 default: 800 val = -EINVAL; 801 break; 802 } 803 804 return val; 805} 806 807static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, 808 struct bcmgenet_tx_ring *ring) 809{ 810 struct enet_cb *tx_cb_ptr; 811 812 tx_cb_ptr = ring->cbs; 813 tx_cb_ptr += ring->write_ptr - ring->cb_ptr; 814 tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; 815 /* Advancing local write pointer */ 816 if (ring->write_ptr == ring->end_ptr) 817 ring->write_ptr = ring->cb_ptr; 818 else 819 ring->write_ptr++; 820 821 return tx_cb_ptr; 822} 823 824/* Simple helper to free a control block's resources */ 825static void bcmgenet_free_cb(struct enet_cb *cb) 826{ 827 dev_kfree_skb_any(cb->skb); 828 cb->skb = NULL; 829 dma_unmap_addr_set(cb, dma_addr, 0); 830} 831 832static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, 833 struct bcmgenet_tx_ring *ring) 834{ 835 bcmgenet_intrl2_0_writel(priv, 836 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, 837 INTRL2_CPU_MASK_SET); 838} 839 840static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, 841 struct bcmgenet_tx_ring *ring) 842{ 843 bcmgenet_intrl2_0_writel(priv, 844 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, 845 INTRL2_CPU_MASK_CLEAR); 846} 847 848static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, 849 struct bcmgenet_tx_ring *ring) 850{ 851 bcmgenet_intrl2_1_writel(priv, 852 (1 << ring->index), INTRL2_CPU_MASK_CLEAR); 853 priv->int1_mask &= ~(1 << ring->index); 854} 855 856static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, 857 struct bcmgenet_tx_ring *ring) 858{ 859 bcmgenet_intrl2_1_writel(priv, 860 (1 << ring->index), INTRL2_CPU_MASK_SET); 861 priv->int1_mask |= (1 << ring->index); 862} 863 864/* Unlocked version of the reclaim routine */ 865static void __bcmgenet_tx_reclaim(struct net_device *dev, 866 struct bcmgenet_tx_ring *ring) 867{ 868 struct bcmgenet_priv *priv = netdev_priv(dev); 869 int last_tx_cn, last_c_index, num_tx_bds; 870 struct enet_cb *tx_cb_ptr; 871 struct netdev_queue *txq; 872 unsigned int c_index; 873 874 /* Compute how many buffers are transmited since last xmit call */ 875 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); 876 txq = netdev_get_tx_queue(dev, ring->queue); 877 878 last_c_index = ring->c_index; 879 num_tx_bds = ring->size; 880 881 c_index &= (num_tx_bds - 1); 882 883 if (c_index >= last_c_index) 884 last_tx_cn = c_index - last_c_index; 885 else 886 last_tx_cn = num_tx_bds - last_c_index + c_index; 887 888 netif_dbg(priv, tx_done, dev, 889 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", 890 __func__, ring->index, 891 c_index, last_tx_cn, last_c_index); 892 893 /* Reclaim transmitted buffers */ 894 while (last_tx_cn-- > 0) { 895 tx_cb_ptr = ring->cbs + last_c_index; 896 if (tx_cb_ptr->skb) { 897 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 898 dma_unmap_single(&dev->dev, 899 dma_unmap_addr(tx_cb_ptr, dma_addr), 900 tx_cb_ptr->skb->len, 901 DMA_TO_DEVICE); 902 bcmgenet_free_cb(tx_cb_ptr); 903 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { 904 dev->stats.tx_bytes += 905 dma_unmap_len(tx_cb_ptr, dma_len); 906 dma_unmap_page(&dev->dev, 907 dma_unmap_addr(tx_cb_ptr, dma_addr), 908 dma_unmap_len(tx_cb_ptr, dma_len), 909 DMA_TO_DEVICE); 910 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); 911 } 912 dev->stats.tx_packets++; 913 ring->free_bds += 1; 914 915 last_c_index++; 916 last_c_index &= (num_tx_bds - 1); 917 } 918 919 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) 920 ring->int_disable(priv, ring); 921 922 if (netif_tx_queue_stopped(txq)) 923 netif_tx_wake_queue(txq); 924 925 ring->c_index = c_index; 926} 927 928static void bcmgenet_tx_reclaim(struct net_device *dev, 929 struct bcmgenet_tx_ring *ring) 930{ 931 unsigned long flags; 932 933 spin_lock_irqsave(&ring->lock, flags); 934 __bcmgenet_tx_reclaim(dev, ring); 935 spin_unlock_irqrestore(&ring->lock, flags); 936} 937 938static void bcmgenet_tx_reclaim_all(struct net_device *dev) 939{ 940 struct bcmgenet_priv *priv = netdev_priv(dev); 941 int i; 942 943 if (netif_is_multiqueue(dev)) { 944 for (i = 0; i < priv->hw_params->tx_queues; i++) 945 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); 946 } 947 948 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); 949} 950 951/* Transmits a single SKB (either head of a fragment or a single SKB) 952 * caller must hold priv->lock 953 */ 954static int bcmgenet_xmit_single(struct net_device *dev, 955 struct sk_buff *skb, 956 u16 dma_desc_flags, 957 struct bcmgenet_tx_ring *ring) 958{ 959 struct bcmgenet_priv *priv = netdev_priv(dev); 960 struct device *kdev = &priv->pdev->dev; 961 struct enet_cb *tx_cb_ptr; 962 unsigned int skb_len; 963 dma_addr_t mapping; 964 u32 length_status; 965 int ret; 966 967 tx_cb_ptr = bcmgenet_get_txcb(priv, ring); 968 969 if (unlikely(!tx_cb_ptr)) 970 BUG(); 971 972 tx_cb_ptr->skb = skb; 973 974 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); 975 976 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 977 ret = dma_mapping_error(kdev, mapping); 978 if (ret) { 979 netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); 980 dev_kfree_skb(skb); 981 return ret; 982 } 983 984 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); 985 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); 986 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | 987 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | 988 DMA_TX_APPEND_CRC; 989 990 if (skb->ip_summed == CHECKSUM_PARTIAL) 991 length_status |= DMA_TX_DO_CSUM; 992 993 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); 994 995 /* Decrement total BD count and advance our write pointer */ 996 ring->free_bds -= 1; 997 ring->prod_index += 1; 998 ring->prod_index &= DMA_P_INDEX_MASK; 999 1000 return 0; 1001} 1002 1003/* Transmit a SKB fragement */ 1004static int bcmgenet_xmit_frag(struct net_device *dev, 1005 skb_frag_t *frag, 1006 u16 dma_desc_flags, 1007 struct bcmgenet_tx_ring *ring) 1008{ 1009 struct bcmgenet_priv *priv = netdev_priv(dev); 1010 struct device *kdev = &priv->pdev->dev; 1011 struct enet_cb *tx_cb_ptr; 1012 dma_addr_t mapping; 1013 int ret; 1014 1015 tx_cb_ptr = bcmgenet_get_txcb(priv, ring); 1016 1017 if (unlikely(!tx_cb_ptr)) 1018 BUG(); 1019 tx_cb_ptr->skb = NULL; 1020 1021 mapping = skb_frag_dma_map(kdev, frag, 0, 1022 skb_frag_size(frag), DMA_TO_DEVICE); 1023 ret = dma_mapping_error(kdev, mapping); 1024 if (ret) { 1025 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", 1026 __func__); 1027 return ret; 1028 } 1029 1030 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); 1031 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); 1032 1033 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, 1034 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | 1035 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); 1036 1037 1038 ring->free_bds -= 1; 1039 ring->prod_index += 1; 1040 ring->prod_index &= DMA_P_INDEX_MASK; 1041 1042 return 0; 1043} 1044 1045/* Reallocate the SKB to put enough headroom in front of it and insert 1046 * the transmit checksum offsets in the descriptors 1047 */ 1048static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) 1049{ 1050 struct status_64 *status = NULL; 1051 struct sk_buff *new_skb; 1052 u16 offset; 1053 u8 ip_proto; 1054 u16 ip_ver; 1055 u32 tx_csum_info; 1056 1057 if (unlikely(skb_headroom(skb) < sizeof(*status))) { 1058 /* If 64 byte status block enabled, must make sure skb has 1059 * enough headroom for us to insert 64B status block. 1060 */ 1061 new_skb = skb_realloc_headroom(skb, sizeof(*status)); 1062 dev_kfree_skb(skb); 1063 if (!new_skb) { 1064 dev->stats.tx_errors++; 1065 dev->stats.tx_dropped++; 1066 return -ENOMEM; 1067 } 1068 skb = new_skb; 1069 } 1070 1071 skb_push(skb, sizeof(*status)); 1072 status = (struct status_64 *)skb->data; 1073 1074 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1075 ip_ver = htons(skb->protocol); 1076 switch (ip_ver) { 1077 case ETH_P_IP: 1078 ip_proto = ip_hdr(skb)->protocol; 1079 break; 1080 case ETH_P_IPV6: 1081 ip_proto = ipv6_hdr(skb)->nexthdr; 1082 break; 1083 default: 1084 return 0; 1085 } 1086 1087 offset = skb_checksum_start_offset(skb) - sizeof(*status); 1088 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | 1089 (offset + skb->csum_offset); 1090 1091 /* Set the length valid bit for TCP and UDP and just set 1092 * the special UDP flag for IPv4, else just set to 0. 1093 */ 1094 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1095 tx_csum_info |= STATUS_TX_CSUM_LV; 1096 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 1097 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; 1098 } else 1099 tx_csum_info = 0; 1100 1101 status->tx_csum_info = tx_csum_info; 1102 } 1103 1104 return 0; 1105} 1106 1107static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) 1108{ 1109 struct bcmgenet_priv *priv = netdev_priv(dev); 1110 struct bcmgenet_tx_ring *ring = NULL; 1111 struct netdev_queue *txq; 1112 unsigned long flags = 0; 1113 int nr_frags, index; 1114 u16 dma_desc_flags; 1115 int ret; 1116 int i; 1117 1118 index = skb_get_queue_mapping(skb); 1119 /* Mapping strategy: 1120 * queue_mapping = 0, unclassified, packet xmited through ring16 1121 * queue_mapping = 1, goes to ring 0. (highest priority queue 1122 * queue_mapping = 2, goes to ring 1. 1123 * queue_mapping = 3, goes to ring 2. 1124 * queue_mapping = 4, goes to ring 3. 1125 */ 1126 if (index == 0) 1127 index = DESC_INDEX; 1128 else 1129 index -= 1; 1130 1131 nr_frags = skb_shinfo(skb)->nr_frags; 1132 ring = &priv->tx_rings[index]; 1133 txq = netdev_get_tx_queue(dev, ring->queue); 1134 1135 spin_lock_irqsave(&ring->lock, flags); 1136 if (ring->free_bds <= nr_frags + 1) { 1137 netif_tx_stop_queue(txq); 1138 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", 1139 __func__, index, ring->queue); 1140 ret = NETDEV_TX_BUSY; 1141 goto out; 1142 } 1143 1144 /* set the SKB transmit checksum */ 1145 if (priv->desc_64b_en) { 1146 ret = bcmgenet_put_tx_csum(dev, skb); 1147 if (ret) { 1148 ret = NETDEV_TX_OK; 1149 goto out; 1150 } 1151 } 1152 1153 dma_desc_flags = DMA_SOP; 1154 if (nr_frags == 0) 1155 dma_desc_flags |= DMA_EOP; 1156 1157 /* Transmit single SKB or head of fragment list */ 1158 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); 1159 if (ret) { 1160 ret = NETDEV_TX_OK; 1161 goto out; 1162 } 1163 1164 /* xmit fragment */ 1165 for (i = 0; i < nr_frags; i++) { 1166 ret = bcmgenet_xmit_frag(dev, 1167 &skb_shinfo(skb)->frags[i], 1168 (i == nr_frags - 1) ? DMA_EOP : 0, ring); 1169 if (ret) { 1170 ret = NETDEV_TX_OK; 1171 goto out; 1172 } 1173 } 1174 1175 skb_tx_timestamp(skb); 1176 1177 /* we kept a software copy of how much we should advance the TDMA 1178 * producer index, now write it down to the hardware 1179 */ 1180 bcmgenet_tdma_ring_writel(priv, ring->index, 1181 ring->prod_index, TDMA_PROD_INDEX); 1182 1183 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { 1184 netif_tx_stop_queue(txq); 1185 ring->int_enable(priv, ring); 1186 } 1187 1188out: 1189 spin_unlock_irqrestore(&ring->lock, flags); 1190 1191 return ret; 1192} 1193 1194 1195static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, 1196 struct enet_cb *cb) 1197{ 1198 struct device *kdev = &priv->pdev->dev; 1199 struct sk_buff *skb; 1200 dma_addr_t mapping; 1201 int ret; 1202 1203 skb = netdev_alloc_skb(priv->dev, 1204 priv->rx_buf_len + SKB_ALIGNMENT); 1205 if (!skb) 1206 return -ENOMEM; 1207 1208 /* a caller did not release this control block */ 1209 WARN_ON(cb->skb != NULL); 1210 cb->skb = skb; 1211 mapping = dma_map_single(kdev, skb->data, 1212 priv->rx_buf_len, DMA_FROM_DEVICE); 1213 ret = dma_mapping_error(kdev, mapping); 1214 if (ret) { 1215 bcmgenet_free_cb(cb); 1216 netif_err(priv, rx_err, priv->dev, 1217 "%s DMA map failed\n", __func__); 1218 return ret; 1219 } 1220 1221 dma_unmap_addr_set(cb, dma_addr, mapping); 1222 /* assign packet, prepare descriptor, and advance pointer */ 1223 1224 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); 1225 1226 /* turn on the newly assigned BD for DMA to use */ 1227 priv->rx_bd_assign_index++; 1228 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); 1229 1230 priv->rx_bd_assign_ptr = priv->rx_bds + 1231 (priv->rx_bd_assign_index * DMA_DESC_SIZE); 1232 1233 return 0; 1234} 1235 1236/* bcmgenet_desc_rx - descriptor based rx process. 1237 * this could be called from bottom half, or from NAPI polling method. 1238 */ 1239static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, 1240 unsigned int budget) 1241{ 1242 struct net_device *dev = priv->dev; 1243 struct enet_cb *cb; 1244 struct sk_buff *skb; 1245 u32 dma_length_status; 1246 unsigned long dma_flag; 1247 int len, err; 1248 unsigned int rxpktprocessed = 0, rxpkttoprocess; 1249 unsigned int p_index; 1250 unsigned int chksum_ok = 0; 1251 1252 p_index = bcmgenet_rdma_ring_readl(priv, 1253 DESC_INDEX, RDMA_PROD_INDEX); 1254 p_index &= DMA_P_INDEX_MASK; 1255 1256 if (p_index < priv->rx_c_index) 1257 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - 1258 priv->rx_c_index + p_index; 1259 else 1260 rxpkttoprocess = p_index - priv->rx_c_index; 1261 1262 netif_dbg(priv, rx_status, dev, 1263 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); 1264 1265 while ((rxpktprocessed < rxpkttoprocess) && 1266 (rxpktprocessed < budget)) { 1267 1268 /* Unmap the packet contents such that we can use the 1269 * RSV from the 64 bytes descriptor when enabled and save 1270 * a 32-bits register read 1271 */ 1272 cb = &priv->rx_cbs[priv->rx_read_ptr]; 1273 skb = cb->skb; 1274 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), 1275 priv->rx_buf_len, DMA_FROM_DEVICE); 1276 1277 if (!priv->desc_64b_en) { 1278 dma_length_status = dmadesc_get_length_status(priv, 1279 priv->rx_bds + 1280 (priv->rx_read_ptr * 1281 DMA_DESC_SIZE)); 1282 } else { 1283 struct status_64 *status; 1284 status = (struct status_64 *)skb->data; 1285 dma_length_status = status->length_status; 1286 } 1287 1288 /* DMA flags and length are still valid no matter how 1289 * we got the Receive Status Vector (64B RSB or register) 1290 */ 1291 dma_flag = dma_length_status & 0xffff; 1292 len = dma_length_status >> DMA_BUFLENGTH_SHIFT; 1293 1294 netif_dbg(priv, rx_status, dev, 1295 "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", 1296 __func__, p_index, priv->rx_c_index, priv->rx_read_ptr, 1297 dma_length_status); 1298 1299 rxpktprocessed++; 1300 1301 priv->rx_read_ptr++; 1302 priv->rx_read_ptr &= (priv->num_rx_bds - 1); 1303 1304 /* out of memory, just drop packets at the hardware level */ 1305 if (unlikely(!skb)) { 1306 dev->stats.rx_dropped++; 1307 dev->stats.rx_errors++; 1308 goto refill; 1309 } 1310 1311 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 1312 netif_err(priv, rx_status, dev, 1313 "Droping fragmented packet!\n"); 1314 dev->stats.rx_dropped++; 1315 dev->stats.rx_errors++; 1316 dev_kfree_skb_any(cb->skb); 1317 cb->skb = NULL; 1318 goto refill; 1319 } 1320 /* report errors */ 1321 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | 1322 DMA_RX_OV | 1323 DMA_RX_NO | 1324 DMA_RX_LG | 1325 DMA_RX_RXER))) { 1326 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", 1327 (unsigned int)dma_flag); 1328 if (dma_flag & DMA_RX_CRC_ERROR) 1329 dev->stats.rx_crc_errors++; 1330 if (dma_flag & DMA_RX_OV) 1331 dev->stats.rx_over_errors++; 1332 if (dma_flag & DMA_RX_NO) 1333 dev->stats.rx_frame_errors++; 1334 if (dma_flag & DMA_RX_LG) 1335 dev->stats.rx_length_errors++; 1336 dev->stats.rx_dropped++; 1337 dev->stats.rx_errors++; 1338 1339 /* discard the packet and advance consumer index.*/ 1340 dev_kfree_skb_any(cb->skb); 1341 cb->skb = NULL; 1342 goto refill; 1343 } /* error packet */ 1344 1345 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && 1346 priv->desc_rxchk_en; 1347 1348 skb_put(skb, len); 1349 if (priv->desc_64b_en) { 1350 skb_pull(skb, 64); 1351 len -= 64; 1352 } 1353 1354 if (likely(chksum_ok)) 1355 skb->ip_summed = CHECKSUM_UNNECESSARY; 1356 1357 /* remove hardware 2bytes added for IP alignment */ 1358 skb_pull(skb, 2); 1359 len -= 2; 1360 1361 if (priv->crc_fwd_en) { 1362 skb_trim(skb, len - ETH_FCS_LEN); 1363 len -= ETH_FCS_LEN; 1364 } 1365 1366 /*Finish setting up the received SKB and send it to the kernel*/ 1367 skb->protocol = eth_type_trans(skb, priv->dev); 1368 dev->stats.rx_packets++; 1369 dev->stats.rx_bytes += len; 1370 if (dma_flag & DMA_RX_MULT) 1371 dev->stats.multicast++; 1372 1373 /* Notify kernel */ 1374 napi_gro_receive(&priv->napi, skb); 1375 cb->skb = NULL; 1376 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); 1377 1378 /* refill RX path on the current control block */ 1379refill: 1380 err = bcmgenet_rx_refill(priv, cb); 1381 if (err) 1382 netif_err(priv, rx_err, dev, "Rx refill failed\n"); 1383 } 1384 1385 return rxpktprocessed; 1386} 1387 1388/* Assign skb to RX DMA descriptor. */ 1389static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) 1390{ 1391 struct enet_cb *cb; 1392 int ret = 0; 1393 int i; 1394 1395 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); 1396 1397 /* loop here for each buffer needing assign */ 1398 for (i = 0; i < priv->num_rx_bds; i++) { 1399 cb = &priv->rx_cbs[priv->rx_bd_assign_index]; 1400 if (cb->skb) 1401 continue; 1402 1403 /* set the DMA descriptor length once and for all 1404 * it will only change if we support dynamically sizing 1405 * priv->rx_buf_len, but we do not 1406 */ 1407 dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr, 1408 priv->rx_buf_len << DMA_BUFLENGTH_SHIFT); 1409 1410 ret = bcmgenet_rx_refill(priv, cb); 1411 if (ret) 1412 break; 1413 1414 } 1415 1416 return ret; 1417} 1418 1419static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) 1420{ 1421 struct enet_cb *cb; 1422 int i; 1423 1424 for (i = 0; i < priv->num_rx_bds; i++) { 1425 cb = &priv->rx_cbs[i]; 1426 1427 if (dma_unmap_addr(cb, dma_addr)) { 1428 dma_unmap_single(&priv->dev->dev, 1429 dma_unmap_addr(cb, dma_addr), 1430 priv->rx_buf_len, DMA_FROM_DEVICE); 1431 dma_unmap_addr_set(cb, dma_addr, 0); 1432 } 1433 1434 if (cb->skb) 1435 bcmgenet_free_cb(cb); 1436 } 1437} 1438 1439static int reset_umac(struct bcmgenet_priv *priv) 1440{ 1441 struct device *kdev = &priv->pdev->dev; 1442 unsigned int timeout = 0; 1443 u32 reg; 1444 1445 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ 1446 bcmgenet_rbuf_ctrl_set(priv, 0); 1447 udelay(10); 1448 1449 /* disable MAC while updating its registers */ 1450 bcmgenet_umac_writel(priv, 0, UMAC_CMD); 1451 1452 /* issue soft reset, wait for it to complete */ 1453 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); 1454 while (timeout++ < 1000) { 1455 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 1456 if (!(reg & CMD_SW_RESET)) 1457 return 0; 1458 1459 udelay(1); 1460 } 1461 1462 if (timeout == 1000) { 1463 dev_err(kdev, 1464 "timeout waiting for MAC to come out of resetn\n"); 1465 return -ETIMEDOUT; 1466 } 1467 1468 return 0; 1469} 1470 1471static int init_umac(struct bcmgenet_priv *priv) 1472{ 1473 struct device *kdev = &priv->pdev->dev; 1474 int ret; 1475 u32 reg, cpu_mask_clear; 1476 1477 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1478 1479 ret = reset_umac(priv); 1480 if (ret) 1481 return ret; 1482 1483 bcmgenet_umac_writel(priv, 0, UMAC_CMD); 1484 /* clear tx/rx counter */ 1485 bcmgenet_umac_writel(priv, 1486 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL); 1487 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); 1488 1489 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1490 1491 /* init rx registers, enable ip header optimization */ 1492 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); 1493 reg |= RBUF_ALIGN_2B; 1494 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); 1495 1496 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) 1497 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); 1498 1499 /* Mask all interrupts.*/ 1500 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); 1501 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); 1502 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1503 1504 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; 1505 1506 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); 1507 1508 /* Monitor cable plug/unpluged event for internal PHY */ 1509 if (phy_is_internal(priv->phydev)) 1510 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); 1511 else if (priv->ext_phy) 1512 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); 1513 else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { 1514 reg = bcmgenet_bp_mc_get(priv); 1515 reg |= BIT(priv->hw_params->bp_in_en_shift); 1516 1517 /* bp_mask: back pressure mask */ 1518 if (netif_is_multiqueue(priv->dev)) 1519 reg |= priv->hw_params->bp_in_mask; 1520 else 1521 reg &= ~priv->hw_params->bp_in_mask; 1522 bcmgenet_bp_mc_set(priv, reg); 1523 } 1524 1525 /* Enable MDIO interrupts on GENET v3+ */ 1526 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) 1527 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; 1528 1529 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, 1530 INTRL2_CPU_MASK_CLEAR); 1531 1532 /* Enable rx/tx engine.*/ 1533 dev_dbg(kdev, "done init umac\n"); 1534 1535 return 0; 1536} 1537 1538/* Initialize all house-keeping variables for a TX ring, along 1539 * with corresponding hardware registers 1540 */ 1541static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, 1542 unsigned int index, unsigned int size, 1543 unsigned int write_ptr, unsigned int end_ptr) 1544{ 1545 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; 1546 u32 words_per_bd = WORDS_PER_BD(priv); 1547 u32 flow_period_val = 0; 1548 unsigned int first_bd; 1549 1550 spin_lock_init(&ring->lock); 1551 ring->index = index; 1552 if (index == DESC_INDEX) { 1553 ring->queue = 0; 1554 ring->int_enable = bcmgenet_tx_ring16_int_enable; 1555 ring->int_disable = bcmgenet_tx_ring16_int_disable; 1556 } else { 1557 ring->queue = index + 1; 1558 ring->int_enable = bcmgenet_tx_ring_int_enable; 1559 ring->int_disable = bcmgenet_tx_ring_int_disable; 1560 } 1561 ring->cbs = priv->tx_cbs + write_ptr; 1562 ring->size = size; 1563 ring->c_index = 0; 1564 ring->free_bds = size; 1565 ring->write_ptr = write_ptr; 1566 ring->cb_ptr = write_ptr; 1567 ring->end_ptr = end_ptr - 1; 1568 ring->prod_index = 0; 1569 1570 /* Set flow period for ring != 16 */ 1571 if (index != DESC_INDEX) 1572 flow_period_val = ENET_MAX_MTU_SIZE << 16; 1573 1574 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); 1575 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); 1576 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); 1577 /* Disable rate control for now */ 1578 bcmgenet_tdma_ring_writel(priv, index, flow_period_val, 1579 TDMA_FLOW_PERIOD); 1580 /* Unclassified traffic goes to ring 16 */ 1581 bcmgenet_tdma_ring_writel(priv, index, 1582 ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), 1583 DMA_RING_BUF_SIZE); 1584 1585 first_bd = write_ptr; 1586 1587 /* Set start and end address, read and write pointers */ 1588 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, 1589 DMA_START_ADDR); 1590 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, 1591 TDMA_READ_PTR); 1592 bcmgenet_tdma_ring_writel(priv, index, first_bd, 1593 TDMA_WRITE_PTR); 1594 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 1595 DMA_END_ADDR); 1596} 1597 1598/* Initialize a RDMA ring */ 1599static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, 1600 unsigned int index, unsigned int size) 1601{ 1602 u32 words_per_bd = WORDS_PER_BD(priv); 1603 int ret; 1604 1605 priv->num_rx_bds = TOTAL_DESC; 1606 priv->rx_bds = priv->base + priv->hw_params->rdma_offset; 1607 priv->rx_bd_assign_ptr = priv->rx_bds; 1608 priv->rx_bd_assign_index = 0; 1609 priv->rx_c_index = 0; 1610 priv->rx_read_ptr = 0; 1611 priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb), 1612 GFP_KERNEL); 1613 if (!priv->rx_cbs) 1614 return -ENOMEM; 1615 1616 ret = bcmgenet_alloc_rx_buffers(priv); 1617 if (ret) { 1618 kfree(priv->rx_cbs); 1619 return ret; 1620 } 1621 1622 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); 1623 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); 1624 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); 1625 bcmgenet_rdma_ring_writel(priv, index, 1626 ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), 1627 DMA_RING_BUF_SIZE); 1628 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); 1629 bcmgenet_rdma_ring_writel(priv, index, 1630 words_per_bd * size - 1, DMA_END_ADDR); 1631 bcmgenet_rdma_ring_writel(priv, index, 1632 (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | 1633 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); 1634 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); 1635 1636 return ret; 1637} 1638 1639/* init multi xmit queues, only available for GENET2+ 1640 * the queue is partitioned as follows: 1641 * 1642 * queue 0 - 3 is priority based, each one has 32 descriptors, 1643 * with queue 0 being the highest priority queue. 1644 * 1645 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT 1646 * descriptors: 256 - (number of tx queues * bds per queues) = 128 1647 * descriptors. 1648 * 1649 * The transmit control block pool is then partitioned as following: 1650 * - tx_cbs[0...127] are for queue 16 1651 * - tx_ring_cbs[0] points to tx_cbs[128..159] 1652 * - tx_ring_cbs[1] points to tx_cbs[160..191] 1653 * - tx_ring_cbs[2] points to tx_cbs[192..223] 1654 * - tx_ring_cbs[3] points to tx_cbs[224..255] 1655 */ 1656static void bcmgenet_init_multiq(struct net_device *dev) 1657{ 1658 struct bcmgenet_priv *priv = netdev_priv(dev); 1659 unsigned int i, dma_enable; 1660 u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0; 1661 1662 if (!netif_is_multiqueue(dev)) { 1663 netdev_warn(dev, "called with non multi queue aware HW\n"); 1664 return; 1665 } 1666 1667 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); 1668 dma_enable = dma_ctrl & DMA_EN; 1669 dma_ctrl &= ~DMA_EN; 1670 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); 1671 1672 /* Enable strict priority arbiter mode */ 1673 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); 1674 1675 for (i = 0; i < priv->hw_params->tx_queues; i++) { 1676 /* first 64 tx_cbs are reserved for default tx queue 1677 * (ring 16) 1678 */ 1679 bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, 1680 i * priv->hw_params->bds_cnt, 1681 (i + 1) * priv->hw_params->bds_cnt); 1682 1683 /* Configure ring as decriptor ring and setup priority */ 1684 ring_cfg |= 1 << i; 1685 dma_priority |= ((GENET_Q0_PRIORITY + i) << 1686 (GENET_MAX_MQ_CNT + 1) * i); 1687 dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); 1688 } 1689 1690 /* Enable rings */ 1691 reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); 1692 reg |= ring_cfg; 1693 bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); 1694 1695 /* Use configured rings priority and set ring #16 priority */ 1696 reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY); 1697 reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20); 1698 reg |= dma_priority; 1699 bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY); 1700 1701 /* Configure ring as descriptor ring and re-enable DMA if enabled */ 1702 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 1703 reg |= dma_ctrl; 1704 if (dma_enable) 1705 reg |= DMA_EN; 1706 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 1707} 1708 1709static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1710{ 1711 int i; 1712 1713 /* disable DMA */ 1714 bcmgenet_rdma_writel(priv, 0, DMA_CTRL); 1715 bcmgenet_tdma_writel(priv, 0, DMA_CTRL); 1716 1717 for (i = 0; i < priv->num_tx_bds; i++) { 1718 if (priv->tx_cbs[i].skb != NULL) { 1719 dev_kfree_skb(priv->tx_cbs[i].skb); 1720 priv->tx_cbs[i].skb = NULL; 1721 } 1722 } 1723 1724 bcmgenet_free_rx_buffers(priv); 1725 kfree(priv->rx_cbs); 1726 kfree(priv->tx_cbs); 1727} 1728 1729/* init_edma: Initialize DMA control register */ 1730static int bcmgenet_init_dma(struct bcmgenet_priv *priv) 1731{ 1732 int ret; 1733 1734 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); 1735 1736 /* by default, enable ring 16 (descriptor based) */ 1737 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); 1738 if (ret) { 1739 netdev_err(priv->dev, "failed to initialize RX ring\n"); 1740 return ret; 1741 } 1742 1743 /* init rDma */ 1744 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); 1745 1746 /* Init tDma */ 1747 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); 1748 1749 /* Initialize commont TX ring structures */ 1750 priv->tx_bds = priv->base + priv->hw_params->tdma_offset; 1751 priv->num_tx_bds = TOTAL_DESC; 1752 priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb), 1753 GFP_KERNEL); 1754 if (!priv->tx_cbs) { 1755 bcmgenet_fini_dma(priv); 1756 return -ENOMEM; 1757 } 1758 1759 /* initialize multi xmit queue */ 1760 bcmgenet_init_multiq(priv->dev); 1761 1762 /* initialize special ring 16 */ 1763 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, 1764 priv->hw_params->tx_queues * priv->hw_params->bds_cnt, 1765 TOTAL_DESC); 1766 1767 return 0; 1768} 1769 1770/* NAPI polling method*/ 1771static int bcmgenet_poll(struct napi_struct *napi, int budget) 1772{ 1773 struct bcmgenet_priv *priv = container_of(napi, 1774 struct bcmgenet_priv, napi); 1775 unsigned int work_done; 1776 1777 /* tx reclaim */ 1778 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 1779 1780 work_done = bcmgenet_desc_rx(priv, budget); 1781 1782 /* Advancing our consumer index*/ 1783 priv->rx_c_index += work_done; 1784 priv->rx_c_index &= DMA_C_INDEX_MASK; 1785 bcmgenet_rdma_ring_writel(priv, DESC_INDEX, 1786 priv->rx_c_index, RDMA_CONS_INDEX); 1787 if (work_done < budget) { 1788 napi_complete(napi); 1789 bcmgenet_intrl2_0_writel(priv, 1790 UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR); 1791 } 1792 1793 return work_done; 1794} 1795 1796/* Interrupt bottom half */ 1797static void bcmgenet_irq_task(struct work_struct *work) 1798{ 1799 struct bcmgenet_priv *priv = container_of( 1800 work, struct bcmgenet_priv, bcmgenet_irq_work); 1801 1802 netif_dbg(priv, intr, priv->dev, "%s\n", __func__); 1803 1804 /* Link UP/DOWN event */ 1805 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 1806 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { 1807 phy_mac_interrupt(priv->phydev, 1808 priv->irq0_stat & UMAC_IRQ_LINK_UP); 1809 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); 1810 } 1811} 1812 1813/* bcmgenet_isr1: interrupt handler for ring buffer. */ 1814static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 1815{ 1816 struct bcmgenet_priv *priv = dev_id; 1817 unsigned int index; 1818 1819 /* Save irq status for bottom-half processing. */ 1820 priv->irq1_stat = 1821 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 1822 ~priv->int1_mask; 1823 /* clear inerrupts*/ 1824 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 1825 1826 netif_dbg(priv, intr, priv->dev, 1827 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 1828 /* Check the MBDONE interrupts. 1829 * packet is done, reclaim descriptors 1830 */ 1831 if (priv->irq1_stat & 0x0000ffff) { 1832 index = 0; 1833 for (index = 0; index < 16; index++) { 1834 if (priv->irq1_stat & (1 << index)) 1835 bcmgenet_tx_reclaim(priv->dev, 1836 &priv->tx_rings[index]); 1837 } 1838 } 1839 return IRQ_HANDLED; 1840} 1841 1842/* bcmgenet_isr0: Handle various interrupts. */ 1843static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) 1844{ 1845 struct bcmgenet_priv *priv = dev_id; 1846 1847 /* Save irq status for bottom-half processing. */ 1848 priv->irq0_stat = 1849 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & 1850 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1851 /* clear inerrupts*/ 1852 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 1853 1854 netif_dbg(priv, intr, priv->dev, 1855 "IRQ=0x%x\n", priv->irq0_stat); 1856 1857 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { 1858 /* We use NAPI(software interrupt throttling, if 1859 * Rx Descriptor throttling is not used. 1860 * Disable interrupt, will be enabled in the poll method. 1861 */ 1862 if (likely(napi_schedule_prep(&priv->napi))) { 1863 bcmgenet_intrl2_0_writel(priv, 1864 UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET); 1865 __napi_schedule(&priv->napi); 1866 } 1867 } 1868 if (priv->irq0_stat & 1869 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { 1870 /* Tx reclaim */ 1871 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 1872 } 1873 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | 1874 UMAC_IRQ_PHY_DET_F | 1875 UMAC_IRQ_LINK_UP | 1876 UMAC_IRQ_LINK_DOWN | 1877 UMAC_IRQ_HFB_SM | 1878 UMAC_IRQ_HFB_MM | 1879 UMAC_IRQ_MPD_R)) { 1880 /* all other interested interrupts handled in bottom half */ 1881 schedule_work(&priv->bcmgenet_irq_work); 1882 } 1883 1884 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 1885 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { 1886 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); 1887 wake_up(&priv->wq); 1888 } 1889 1890 return IRQ_HANDLED; 1891} 1892 1893static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) 1894{ 1895 u32 reg; 1896 1897 reg = bcmgenet_rbuf_ctrl_get(priv); 1898 reg |= BIT(1); 1899 bcmgenet_rbuf_ctrl_set(priv, reg); 1900 udelay(10); 1901 1902 reg &= ~BIT(1); 1903 bcmgenet_rbuf_ctrl_set(priv, reg); 1904 udelay(10); 1905} 1906 1907static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, 1908 unsigned char *addr) 1909{ 1910 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | 1911 (addr[2] << 8) | addr[3], UMAC_MAC0); 1912 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); 1913} 1914 1915static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) 1916{ 1917 int ret; 1918 1919 /* From WOL-enabled suspend, switch to regular clock */ 1920 clk_disable(priv->clk_wol); 1921 /* init umac registers to synchronize s/w with h/w */ 1922 ret = init_umac(priv); 1923 if (ret) 1924 return ret; 1925 1926 phy_init_hw(priv->phydev); 1927 /* Speed settings must be restored */ 1928 bcmgenet_mii_config(priv->dev); 1929 1930 return 0; 1931} 1932 1933/* Returns a reusable dma control register value */ 1934static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) 1935{ 1936 u32 reg; 1937 u32 dma_ctrl; 1938 1939 /* disable DMA */ 1940 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; 1941 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 1942 reg &= ~dma_ctrl; 1943 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 1944 1945 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 1946 reg &= ~dma_ctrl; 1947 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 1948 1949 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); 1950 udelay(10); 1951 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); 1952 1953 return dma_ctrl; 1954} 1955 1956static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) 1957{ 1958 u32 reg; 1959 1960 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 1961 reg |= dma_ctrl; 1962 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 1963 1964 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 1965 reg |= dma_ctrl; 1966 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 1967} 1968 1969static int bcmgenet_open(struct net_device *dev) 1970{ 1971 struct bcmgenet_priv *priv = netdev_priv(dev); 1972 unsigned long dma_ctrl; 1973 u32 reg; 1974 int ret; 1975 1976 netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); 1977 1978 /* Turn on the clock */ 1979 if (!IS_ERR(priv->clk)) 1980 clk_prepare_enable(priv->clk); 1981 1982 /* take MAC out of reset */ 1983 bcmgenet_umac_reset(priv); 1984 1985 ret = init_umac(priv); 1986 if (ret) 1987 goto err_clk_disable; 1988 1989 /* disable ethernet MAC while updating its registers */ 1990 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 1991 reg &= ~(CMD_TX_EN | CMD_RX_EN); 1992 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 1993 1994 bcmgenet_set_hw_addr(priv, dev->dev_addr); 1995 1996 if (priv->wol_enabled) { 1997 ret = bcmgenet_wol_resume(priv); 1998 if (ret) 1999 return ret; 2000 } 2001 2002 if (phy_is_internal(priv->phydev)) { 2003 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); 2004 reg |= EXT_ENERGY_DET_MASK; 2005 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); 2006 } 2007 2008 /* Disable RX/TX DMA and flush TX queues */ 2009 dma_ctrl = bcmgenet_dma_disable(priv); 2010 2011 /* Reinitialize TDMA and RDMA and SW housekeeping */ 2012 ret = bcmgenet_init_dma(priv); 2013 if (ret) { 2014 netdev_err(dev, "failed to initialize DMA\n"); 2015 goto err_fini_dma; 2016 } 2017 2018 /* Always enable ring 16 - descriptor ring */ 2019 bcmgenet_enable_dma(priv, dma_ctrl); 2020 2021 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, 2022 dev->name, priv); 2023 if (ret < 0) { 2024 netdev_err(dev, "can't request IRQ %d\n", priv->irq0); 2025 goto err_fini_dma; 2026 } 2027 2028 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, 2029 dev->name, priv); 2030 if (ret < 0) { 2031 netdev_err(dev, "can't request IRQ %d\n", priv->irq1); 2032 goto err_irq0; 2033 } 2034 2035 /* Start the network engine */ 2036 napi_enable(&priv->napi); 2037 2038 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2039 reg |= (CMD_TX_EN | CMD_RX_EN); 2040 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 2041 2042 /* Make sure we reflect the value of CRC_CMD_FWD */ 2043 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); 2044 2045 device_set_wakeup_capable(&dev->dev, 1); 2046 2047 if (phy_is_internal(priv->phydev)) 2048 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 2049 2050 netif_tx_start_all_queues(dev); 2051 2052 phy_start(priv->phydev); 2053 2054 return 0; 2055 2056err_irq0: 2057 free_irq(priv->irq0, dev); 2058err_fini_dma: 2059 bcmgenet_fini_dma(priv); 2060err_clk_disable: 2061 if (!IS_ERR(priv->clk)) 2062 clk_disable_unprepare(priv->clk); 2063 return ret; 2064} 2065 2066static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) 2067{ 2068 int ret = 0; 2069 int timeout = 0; 2070 u32 reg; 2071 2072 /* Disable TDMA to stop add more frames in TX DMA */ 2073 reg = bcmgenet_tdma_readl(priv, DMA_CTRL); 2074 reg &= ~DMA_EN; 2075 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 2076 2077 /* Check TDMA status register to confirm TDMA is disabled */ 2078 while (timeout++ < DMA_TIMEOUT_VAL) { 2079 reg = bcmgenet_tdma_readl(priv, DMA_STATUS); 2080 if (reg & DMA_DISABLED) 2081 break; 2082 2083 udelay(1); 2084 } 2085 2086 if (timeout == DMA_TIMEOUT_VAL) { 2087 netdev_warn(priv->dev, 2088 "Timed out while disabling TX DMA\n"); 2089 ret = -ETIMEDOUT; 2090 } 2091 2092 /* Wait 10ms for packet drain in both tx and rx dma */ 2093 usleep_range(10000, 20000); 2094 2095 /* Disable RDMA */ 2096 reg = bcmgenet_rdma_readl(priv, DMA_CTRL); 2097 reg &= ~DMA_EN; 2098 bcmgenet_rdma_writel(priv, reg, DMA_CTRL); 2099 2100 timeout = 0; 2101 /* Check RDMA status register to confirm RDMA is disabled */ 2102 while (timeout++ < DMA_TIMEOUT_VAL) { 2103 reg = bcmgenet_rdma_readl(priv, DMA_STATUS); 2104 if (reg & DMA_DISABLED) 2105 break; 2106 2107 udelay(1); 2108 } 2109 2110 if (timeout == DMA_TIMEOUT_VAL) { 2111 netdev_warn(priv->dev, 2112 "Timed out while disabling RX DMA\n"); 2113 ret = -ETIMEDOUT; 2114 } 2115 2116 return ret; 2117} 2118 2119static int bcmgenet_close(struct net_device *dev) 2120{ 2121 struct bcmgenet_priv *priv = netdev_priv(dev); 2122 int ret; 2123 u32 reg; 2124 2125 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); 2126 2127 phy_stop(priv->phydev); 2128 2129 /* Disable MAC receive */ 2130 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2131 reg &= ~CMD_RX_EN; 2132 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 2133 2134 netif_tx_stop_all_queues(dev); 2135 2136 ret = bcmgenet_dma_teardown(priv); 2137 if (ret) 2138 return ret; 2139 2140 /* Disable MAC transmit. TX DMA disabled have to done before this */ 2141 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2142 reg &= ~CMD_TX_EN; 2143 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 2144 2145 napi_disable(&priv->napi); 2146 2147 /* tx reclaim */ 2148 bcmgenet_tx_reclaim_all(dev); 2149 bcmgenet_fini_dma(priv); 2150 2151 free_irq(priv->irq0, priv); 2152 free_irq(priv->irq1, priv); 2153 2154 /* Wait for pending work items to complete - we are stopping 2155 * the clock now. Since interrupts are disabled, no new work 2156 * will be scheduled. 2157 */ 2158 cancel_work_sync(&priv->bcmgenet_irq_work); 2159 2160 if (phy_is_internal(priv->phydev)) 2161 bcmgenet_power_down(priv, GENET_POWER_PASSIVE); 2162 2163 if (priv->wol_enabled) 2164 clk_enable(priv->clk_wol); 2165 2166 if (!IS_ERR(priv->clk)) 2167 clk_disable_unprepare(priv->clk); 2168 2169 return 0; 2170} 2171 2172static void bcmgenet_timeout(struct net_device *dev) 2173{ 2174 struct bcmgenet_priv *priv = netdev_priv(dev); 2175 2176 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); 2177 2178 dev->trans_start = jiffies; 2179 2180 dev->stats.tx_errors++; 2181 2182 netif_tx_wake_all_queues(dev); 2183} 2184 2185#define MAX_MC_COUNT 16 2186 2187static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, 2188 unsigned char *addr, 2189 int *i, 2190 int *mc) 2191{ 2192 u32 reg; 2193 2194 bcmgenet_umac_writel(priv, 2195 addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4)); 2196 bcmgenet_umac_writel(priv, 2197 addr[2] << 24 | addr[3] << 16 | 2198 addr[4] << 8 | addr[5], 2199 UMAC_MDF_ADDR + ((*i + 1) * 4)); 2200 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); 2201 reg |= (1 << (MAX_MC_COUNT - *mc)); 2202 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); 2203 *i += 2; 2204 (*mc)++; 2205} 2206 2207static void bcmgenet_set_rx_mode(struct net_device *dev) 2208{ 2209 struct bcmgenet_priv *priv = netdev_priv(dev); 2210 struct netdev_hw_addr *ha; 2211 int i, mc; 2212 u32 reg; 2213 2214 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); 2215 2216 /* Promiscous mode */ 2217 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2218 if (dev->flags & IFF_PROMISC) { 2219 reg |= CMD_PROMISC; 2220 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 2221 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); 2222 return; 2223 } else { 2224 reg &= ~CMD_PROMISC; 2225 bcmgenet_umac_writel(priv, reg, UMAC_CMD); 2226 } 2227 2228 /* UniMac doesn't support ALLMULTI */ 2229 if (dev->flags & IFF_ALLMULTI) { 2230 netdev_warn(dev, "ALLMULTI is not supported\n"); 2231 return; 2232 } 2233 2234 /* update MDF filter */ 2235 i = 0; 2236 mc = 0; 2237 /* Broadcast */ 2238 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); 2239 /* my own address.*/ 2240 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); 2241 /* Unicast list*/ 2242 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) 2243 return; 2244 2245 if (!netdev_uc_empty(dev)) 2246 netdev_for_each_uc_addr(ha, dev) 2247 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); 2248 /* Multicast */ 2249 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) 2250 return; 2251 2252 netdev_for_each_mc_addr(ha, dev) 2253 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); 2254} 2255 2256/* Set the hardware MAC address. */ 2257static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) 2258{ 2259 struct sockaddr *addr = p; 2260 2261 /* Setting the MAC address at the hardware level is not possible 2262 * without disabling the UniMAC RX/TX enable bits. 2263 */ 2264 if (netif_running(dev)) 2265 return -EBUSY; 2266 2267 ether_addr_copy(dev->dev_addr, addr->sa_data); 2268 2269 return 0; 2270} 2271 2272static const struct net_device_ops bcmgenet_netdev_ops = { 2273 .ndo_open = bcmgenet_open, 2274 .ndo_stop = bcmgenet_close, 2275 .ndo_start_xmit = bcmgenet_xmit, 2276 .ndo_tx_timeout = bcmgenet_timeout, 2277 .ndo_set_rx_mode = bcmgenet_set_rx_mode, 2278 .ndo_set_mac_address = bcmgenet_set_mac_addr, 2279 .ndo_do_ioctl = bcmgenet_ioctl, 2280 .ndo_set_features = bcmgenet_set_features, 2281}; 2282 2283/* Array of GENET hardware parameters/characteristics */ 2284static struct bcmgenet_hw_params bcmgenet_hw_params[] = { 2285 [GENET_V1] = { 2286 .tx_queues = 0, 2287 .rx_queues = 0, 2288 .bds_cnt = 0, 2289 .bp_in_en_shift = 16, 2290 .bp_in_mask = 0xffff, 2291 .hfb_filter_cnt = 16, 2292 .qtag_mask = 0x1F, 2293 .hfb_offset = 0x1000, 2294 .rdma_offset = 0x2000, 2295 .tdma_offset = 0x3000, 2296 .words_per_bd = 2, 2297 }, 2298 [GENET_V2] = { 2299 .tx_queues = 4, 2300 .rx_queues = 4, 2301 .bds_cnt = 32, 2302 .bp_in_en_shift = 16, 2303 .bp_in_mask = 0xffff, 2304 .hfb_filter_cnt = 16, 2305 .qtag_mask = 0x1F, 2306 .tbuf_offset = 0x0600, 2307 .hfb_offset = 0x1000, 2308 .hfb_reg_offset = 0x2000, 2309 .rdma_offset = 0x3000, 2310 .tdma_offset = 0x4000, 2311 .words_per_bd = 2, 2312 .flags = GENET_HAS_EXT, 2313 }, 2314 [GENET_V3] = { 2315 .tx_queues = 4, 2316 .rx_queues = 4, 2317 .bds_cnt = 32, 2318 .bp_in_en_shift = 17, 2319 .bp_in_mask = 0x1ffff, 2320 .hfb_filter_cnt = 48, 2321 .qtag_mask = 0x3F, 2322 .tbuf_offset = 0x0600, 2323 .hfb_offset = 0x8000, 2324 .hfb_reg_offset = 0xfc00, 2325 .rdma_offset = 0x10000, 2326 .tdma_offset = 0x11000, 2327 .words_per_bd = 2, 2328 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, 2329 }, 2330 [GENET_V4] = { 2331 .tx_queues = 4, 2332 .rx_queues = 4, 2333 .bds_cnt = 32, 2334 .bp_in_en_shift = 17, 2335 .bp_in_mask = 0x1ffff, 2336 .hfb_filter_cnt = 48, 2337 .qtag_mask = 0x3F, 2338 .tbuf_offset = 0x0600, 2339 .hfb_offset = 0x8000, 2340 .hfb_reg_offset = 0xfc00, 2341 .rdma_offset = 0x2000, 2342 .tdma_offset = 0x4000, 2343 .words_per_bd = 3, 2344 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, 2345 }, 2346}; 2347 2348/* Infer hardware parameters from the detected GENET version */ 2349static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) 2350{ 2351 struct bcmgenet_hw_params *params; 2352 u32 reg; 2353 u8 major; 2354 2355 if (GENET_IS_V4(priv)) { 2356 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; 2357 genet_dma_ring_regs = genet_dma_ring_regs_v4; 2358 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; 2359 priv->version = GENET_V4; 2360 } else if (GENET_IS_V3(priv)) { 2361 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; 2362 genet_dma_ring_regs = genet_dma_ring_regs_v123; 2363 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; 2364 priv->version = GENET_V3; 2365 } else if (GENET_IS_V2(priv)) { 2366 bcmgenet_dma_regs = bcmgenet_dma_regs_v2; 2367 genet_dma_ring_regs = genet_dma_ring_regs_v123; 2368 priv->dma_rx_chk_bit = DMA_RX_CHK_V12; 2369 priv->version = GENET_V2; 2370 } else if (GENET_IS_V1(priv)) { 2371 bcmgenet_dma_regs = bcmgenet_dma_regs_v1; 2372 genet_dma_ring_regs = genet_dma_ring_regs_v123; 2373 priv->dma_rx_chk_bit = DMA_RX_CHK_V12; 2374 priv->version = GENET_V1; 2375 } 2376 2377 /* enum genet_version starts at 1 */ 2378 priv->hw_params = &bcmgenet_hw_params[priv->version]; 2379 params = priv->hw_params; 2380 2381 /* Read GENET HW version */ 2382 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); 2383 major = (reg >> 24 & 0x0f); 2384 if (major == 5) 2385 major = 4; 2386 else if (major == 0) 2387 major = 1; 2388 if (major != priv->version) { 2389 dev_err(&priv->pdev->dev, 2390 "GENET version mismatch, got: %d, configured for: %d\n", 2391 major, priv->version); 2392 } 2393 2394 /* Print the GENET core version */ 2395 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, 2396 major, (reg >> 16) & 0x0f, reg & 0xffff); 2397 2398#ifdef CONFIG_PHYS_ADDR_T_64BIT 2399 if (!(params->flags & GENET_HAS_40BITS)) 2400 pr_warn("GENET does not support 40-bits PA\n"); 2401#endif 2402 2403 pr_debug("Configuration for version: %d\n" 2404 "TXq: %1d, RXq: %1d, BDs: %1d\n" 2405 "BP << en: %2d, BP msk: 0x%05x\n" 2406 "HFB count: %2d, QTAQ msk: 0x%05x\n" 2407 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" 2408 "RDMA: 0x%05x, TDMA: 0x%05x\n" 2409 "Words/BD: %d\n", 2410 priv->version, 2411 params->tx_queues, params->rx_queues, params->bds_cnt, 2412 params->bp_in_en_shift, params->bp_in_mask, 2413 params->hfb_filter_cnt, params->qtag_mask, 2414 params->tbuf_offset, params->hfb_offset, 2415 params->hfb_reg_offset, 2416 params->rdma_offset, params->tdma_offset, 2417 params->words_per_bd); 2418} 2419 2420static const struct of_device_id bcmgenet_match[] = { 2421 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, 2422 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, 2423 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, 2424 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, 2425 { }, 2426}; 2427 2428static int bcmgenet_probe(struct platform_device *pdev) 2429{ 2430 struct device_node *dn = pdev->dev.of_node; 2431 const struct of_device_id *of_id; 2432 struct bcmgenet_priv *priv; 2433 struct net_device *dev; 2434 const void *macaddr; 2435 struct resource *r; 2436 int err = -EIO; 2437 2438 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ 2439 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); 2440 if (!dev) { 2441 dev_err(&pdev->dev, "can't allocate net device\n"); 2442 return -ENOMEM; 2443 } 2444 2445 of_id = of_match_node(bcmgenet_match, dn); 2446 if (!of_id) 2447 return -EINVAL; 2448 2449 priv = netdev_priv(dev); 2450 priv->irq0 = platform_get_irq(pdev, 0); 2451 priv->irq1 = platform_get_irq(pdev, 1); 2452 if (!priv->irq0 || !priv->irq1) { 2453 dev_err(&pdev->dev, "can't find IRQs\n"); 2454 err = -EINVAL; 2455 goto err; 2456 } 2457 2458 macaddr = of_get_mac_address(dn); 2459 if (!macaddr) { 2460 dev_err(&pdev->dev, "can't find MAC address\n"); 2461 err = -EINVAL; 2462 goto err; 2463 } 2464 2465 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2466 priv->base = devm_ioremap_resource(&pdev->dev, r); 2467 if (IS_ERR(priv->base)) { 2468 err = PTR_ERR(priv->base); 2469 goto err; 2470 } 2471 2472 SET_NETDEV_DEV(dev, &pdev->dev); 2473 dev_set_drvdata(&pdev->dev, dev); 2474 ether_addr_copy(dev->dev_addr, macaddr); 2475 dev->watchdog_timeo = 2 * HZ; 2476 SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops); 2477 dev->netdev_ops = &bcmgenet_netdev_ops; 2478 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); 2479 2480 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); 2481 2482 /* Set hardware features */ 2483 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | 2484 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; 2485 2486 /* Set the needed headroom to account for any possible 2487 * features enabling/disabling at runtime 2488 */ 2489 dev->needed_headroom += 64; 2490 2491 netdev_boot_setup_check(dev); 2492 2493 priv->dev = dev; 2494 priv->pdev = pdev; 2495 priv->version = (enum bcmgenet_version)of_id->data; 2496 2497 bcmgenet_set_hw_params(priv); 2498 2499 /* Mii wait queue */ 2500 init_waitqueue_head(&priv->wq); 2501 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ 2502 priv->rx_buf_len = RX_BUF_LENGTH; 2503 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); 2504 2505 priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); 2506 if (IS_ERR(priv->clk)) 2507 dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); 2508 2509 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); 2510 if (IS_ERR(priv->clk_wol)) 2511 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); 2512 2513 if (!IS_ERR(priv->clk)) 2514 clk_prepare_enable(priv->clk); 2515 2516 err = reset_umac(priv); 2517 if (err) 2518 goto err_clk_disable; 2519 2520 err = bcmgenet_mii_init(dev); 2521 if (err) 2522 goto err_clk_disable; 2523 2524 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues 2525 * just the ring 16 descriptor based TX 2526 */ 2527 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); 2528 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); 2529 2530 err = register_netdev(dev); 2531 if (err) 2532 goto err_clk_disable; 2533 2534 /* Turn off the main clock, WOL clock is handled separately */ 2535 if (!IS_ERR(priv->clk)) 2536 clk_disable_unprepare(priv->clk); 2537 2538 return err; 2539 2540err_clk_disable: 2541 if (!IS_ERR(priv->clk)) 2542 clk_disable_unprepare(priv->clk); 2543err: 2544 free_netdev(dev); 2545 return err; 2546} 2547 2548static int bcmgenet_remove(struct platform_device *pdev) 2549{ 2550 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); 2551 2552 dev_set_drvdata(&pdev->dev, NULL); 2553 unregister_netdev(priv->dev); 2554 bcmgenet_mii_exit(priv->dev); 2555 free_netdev(priv->dev); 2556 2557 return 0; 2558} 2559 2560 2561static struct platform_driver bcmgenet_driver = { 2562 .probe = bcmgenet_probe, 2563 .remove = bcmgenet_remove, 2564 .driver = { 2565 .name = "bcmgenet", 2566 .owner = THIS_MODULE, 2567 .of_match_table = bcmgenet_match, 2568 }, 2569}; 2570module_platform_driver(bcmgenet_driver); 2571 2572MODULE_AUTHOR("Broadcom Corporation"); 2573MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); 2574MODULE_ALIAS("platform:bcmgenet"); 2575MODULE_LICENSE("GPL"); 2576