1/* 2 * Blackfin On-Chip MAC Driver 3 * 4 * Copyright 2004-2010 Analog Devices Inc. 5 * 6 * Enter bugs at http://blackfin.uclinux.org/ 7 * 8 * Licensed under the GPL-2 or later. 9 */ 10 11#define DRV_VERSION "1.1" 12#define DRV_DESC "Blackfin on-chip Ethernet MAC driver" 13 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16#include <linux/init.h> 17#include <linux/module.h> 18#include <linux/kernel.h> 19#include <linux/sched.h> 20#include <linux/slab.h> 21#include <linux/delay.h> 22#include <linux/timer.h> 23#include <linux/errno.h> 24#include <linux/irq.h> 25#include <linux/io.h> 26#include <linux/ioport.h> 27#include <linux/crc32.h> 28#include <linux/device.h> 29#include <linux/spinlock.h> 30#include <linux/mii.h> 31#include <linux/netdevice.h> 32#include <linux/etherdevice.h> 33#include <linux/ethtool.h> 34#include <linux/skbuff.h> 35#include <linux/platform_device.h> 36 37#include <asm/dma.h> 38#include <linux/dma-mapping.h> 39 40#include <asm/div64.h> 41#include <asm/dpmc.h> 42#include <asm/blackfin.h> 43#include <asm/cacheflush.h> 44#include <asm/portmux.h> 45#include <mach/pll.h> 46 47#include "bfin_mac.h" 48 49MODULE_AUTHOR("Bryan Wu, Luke Yang"); 50MODULE_LICENSE("GPL"); 51MODULE_DESCRIPTION(DRV_DESC); 52MODULE_ALIAS("platform:bfin_mac"); 53 54#if defined(CONFIG_BFIN_MAC_USE_L1) 55# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num) 56# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr) 57#else 58# define bfin_mac_alloc(dma_handle, size, num) \ 59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL) 60# define bfin_mac_free(dma_handle, ptr, num) \ 61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle) 62#endif 63 64#define PKT_BUF_SZ 1580 65 66#define MAX_TIMEOUT_CNT 500 67 68/* pointers to maintain transmit list */ 69static struct net_dma_desc_tx *tx_list_head; 70static struct net_dma_desc_tx *tx_list_tail; 71static struct net_dma_desc_rx *rx_list_head; 72static struct net_dma_desc_rx *rx_list_tail; 73static struct net_dma_desc_rx *current_rx_ptr; 74static struct net_dma_desc_tx *current_tx_ptr; 75static struct net_dma_desc_tx *tx_desc; 76static struct net_dma_desc_rx *rx_desc; 77 78static void desc_list_free(void) 79{ 80 struct net_dma_desc_rx *r; 81 struct net_dma_desc_tx *t; 82 int i; 83#if !defined(CONFIG_BFIN_MAC_USE_L1) 84 dma_addr_t dma_handle = 0; 85#endif 86 87 if (tx_desc) { 88 t = tx_list_head; 89 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { 90 if (t) { 91 if (t->skb) { 92 dev_kfree_skb(t->skb); 93 t->skb = NULL; 94 } 95 t = t->next; 96 } 97 } 98 bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM); 99 } 100 101 if (rx_desc) { 102 r = rx_list_head; 103 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { 104 if (r) { 105 if (r->skb) { 106 dev_kfree_skb(r->skb); 107 r->skb = NULL; 108 } 109 r = r->next; 110 } 111 } 112 bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM); 113 } 114} 115 116static int desc_list_init(struct net_device *dev) 117{ 118 int i; 119 struct sk_buff *new_skb; 120#if !defined(CONFIG_BFIN_MAC_USE_L1) 121 /* 122 * This dma_handle is useless in Blackfin dma_alloc_coherent(). 123 * The real dma handler is the return value of dma_alloc_coherent(). 124 */ 125 dma_addr_t dma_handle; 126#endif 127 128 tx_desc = bfin_mac_alloc(&dma_handle, 129 sizeof(struct net_dma_desc_tx), 130 CONFIG_BFIN_TX_DESC_NUM); 131 if (tx_desc == NULL) 132 goto init_error; 133 134 rx_desc = bfin_mac_alloc(&dma_handle, 135 sizeof(struct net_dma_desc_rx), 136 CONFIG_BFIN_RX_DESC_NUM); 137 if (rx_desc == NULL) 138 goto init_error; 139 140 /* init tx_list */ 141 tx_list_head = tx_list_tail = tx_desc; 142 143 for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) { 144 struct net_dma_desc_tx *t = tx_desc + i; 145 struct dma_descriptor *a = &(t->desc_a); 146 struct dma_descriptor *b = &(t->desc_b); 147 148 /* 149 * disable DMA 150 * read from memory WNR = 0 151 * wordsize is 32 bits 152 * 6 half words is desc size 153 * large desc flow 154 */ 155 a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; 156 a->start_addr = (unsigned long)t->packet; 157 a->x_count = 0; 158 a->next_dma_desc = b; 159 160 /* 161 * enabled DMA 162 * write to memory WNR = 1 163 * wordsize is 32 bits 164 * disable interrupt 165 * 6 half words is desc size 166 * large desc flow 167 */ 168 b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; 169 b->start_addr = (unsigned long)(&(t->status)); 170 b->x_count = 0; 171 172 t->skb = NULL; 173 tx_list_tail->desc_b.next_dma_desc = a; 174 tx_list_tail->next = t; 175 tx_list_tail = t; 176 } 177 tx_list_tail->next = tx_list_head; /* tx_list is a circle */ 178 tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a); 179 current_tx_ptr = tx_list_head; 180 181 /* init rx_list */ 182 rx_list_head = rx_list_tail = rx_desc; 183 184 for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) { 185 struct net_dma_desc_rx *r = rx_desc + i; 186 struct dma_descriptor *a = &(r->desc_a); 187 struct dma_descriptor *b = &(r->desc_b); 188 189 /* allocate a new skb for next time receive */ 190 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 191 if (!new_skb) { 192 pr_notice("init: low on mem - packet dropped\n"); 193 goto init_error; 194 } 195 skb_reserve(new_skb, NET_IP_ALIGN); 196 /* Invidate the data cache of skb->data range when it is write back 197 * cache. It will prevent overwritting the new data from DMA 198 */ 199 blackfin_dcache_invalidate_range((unsigned long)new_skb->head, 200 (unsigned long)new_skb->end); 201 r->skb = new_skb; 202 203 /* 204 * enabled DMA 205 * write to memory WNR = 1 206 * wordsize is 32 bits 207 * disable interrupt 208 * 6 half words is desc size 209 * large desc flow 210 */ 211 a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE; 212 /* since RXDWA is enabled */ 213 a->start_addr = (unsigned long)new_skb->data - 2; 214 a->x_count = 0; 215 a->next_dma_desc = b; 216 217 /* 218 * enabled DMA 219 * write to memory WNR = 1 220 * wordsize is 32 bits 221 * enable interrupt 222 * 6 half words is desc size 223 * large desc flow 224 */ 225 b->config = DMAEN | WNR | WDSIZE_32 | DI_EN | 226 NDSIZE_6 | DMAFLOW_LARGE; 227 b->start_addr = (unsigned long)(&(r->status)); 228 b->x_count = 0; 229 230 rx_list_tail->desc_b.next_dma_desc = a; 231 rx_list_tail->next = r; 232 rx_list_tail = r; 233 } 234 rx_list_tail->next = rx_list_head; /* rx_list is a circle */ 235 rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a); 236 current_rx_ptr = rx_list_head; 237 238 return 0; 239 240init_error: 241 desc_list_free(); 242 pr_err("kmalloc failed\n"); 243 return -ENOMEM; 244} 245 246 247/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ 248 249/* 250 * MII operations 251 */ 252/* Wait until the previous MDC/MDIO transaction has completed */ 253static int bfin_mdio_poll(void) 254{ 255 int timeout_cnt = MAX_TIMEOUT_CNT; 256 257 /* poll the STABUSY bit */ 258 while ((bfin_read_EMAC_STAADD()) & STABUSY) { 259 udelay(1); 260 if (timeout_cnt-- < 0) { 261 pr_err("wait MDC/MDIO transaction to complete timeout\n"); 262 return -ETIMEDOUT; 263 } 264 } 265 266 return 0; 267} 268 269/* Read an off-chip register in a PHY through the MDC/MDIO port */ 270static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 271{ 272 int ret; 273 274 ret = bfin_mdio_poll(); 275 if (ret) 276 return ret; 277 278 /* read mode */ 279 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | 280 SET_REGAD((u16) regnum) | 281 STABUSY); 282 283 ret = bfin_mdio_poll(); 284 if (ret) 285 return ret; 286 287 return (int) bfin_read_EMAC_STADAT(); 288} 289 290/* Write an off-chip register in a PHY through the MDC/MDIO port */ 291static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, 292 u16 value) 293{ 294 int ret; 295 296 ret = bfin_mdio_poll(); 297 if (ret) 298 return ret; 299 300 bfin_write_EMAC_STADAT((u32) value); 301 302 /* write mode */ 303 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | 304 SET_REGAD((u16) regnum) | 305 STAOP | 306 STABUSY); 307 308 return bfin_mdio_poll(); 309} 310 311static int bfin_mdiobus_reset(struct mii_bus *bus) 312{ 313 return 0; 314} 315 316static void bfin_mac_adjust_link(struct net_device *dev) 317{ 318 struct bfin_mac_local *lp = netdev_priv(dev); 319 struct phy_device *phydev = lp->phydev; 320 unsigned long flags; 321 int new_state = 0; 322 323 spin_lock_irqsave(&lp->lock, flags); 324 if (phydev->link) { 325 /* Now we make sure that we can be in full duplex mode. 326 * If not, we operate in half-duplex mode. */ 327 if (phydev->duplex != lp->old_duplex) { 328 u32 opmode = bfin_read_EMAC_OPMODE(); 329 new_state = 1; 330 331 if (phydev->duplex) 332 opmode |= FDMODE; 333 else 334 opmode &= ~(FDMODE); 335 336 bfin_write_EMAC_OPMODE(opmode); 337 lp->old_duplex = phydev->duplex; 338 } 339 340 if (phydev->speed != lp->old_speed) { 341 if (phydev->interface == PHY_INTERFACE_MODE_RMII) { 342 u32 opmode = bfin_read_EMAC_OPMODE(); 343 switch (phydev->speed) { 344 case 10: 345 opmode |= RMII_10; 346 break; 347 case 100: 348 opmode &= ~RMII_10; 349 break; 350 default: 351 netdev_warn(dev, 352 "Ack! Speed (%d) is not 10/100!\n", 353 phydev->speed); 354 break; 355 } 356 bfin_write_EMAC_OPMODE(opmode); 357 } 358 359 new_state = 1; 360 lp->old_speed = phydev->speed; 361 } 362 363 if (!lp->old_link) { 364 new_state = 1; 365 lp->old_link = 1; 366 } 367 } else if (lp->old_link) { 368 new_state = 1; 369 lp->old_link = 0; 370 lp->old_speed = 0; 371 lp->old_duplex = -1; 372 } 373 374 if (new_state) { 375 u32 opmode = bfin_read_EMAC_OPMODE(); 376 phy_print_status(phydev); 377 pr_debug("EMAC_OPMODE = 0x%08x\n", opmode); 378 } 379 380 spin_unlock_irqrestore(&lp->lock, flags); 381} 382 383/* MDC = 2.5 MHz */ 384#define MDC_CLK 2500000 385 386static int mii_probe(struct net_device *dev, int phy_mode) 387{ 388 struct bfin_mac_local *lp = netdev_priv(dev); 389 struct phy_device *phydev = NULL; 390 unsigned short sysctl; 391 int i; 392 u32 sclk, mdc_div; 393 394 /* Enable PHY output early */ 395 if (!(bfin_read_VR_CTL() & CLKBUFOE)) 396 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE); 397 398 sclk = get_sclk(); 399 mdc_div = ((sclk / MDC_CLK) / 2) - 1; 400 401 sysctl = bfin_read_EMAC_SYSCTL(); 402 sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); 403 bfin_write_EMAC_SYSCTL(sysctl); 404 405 /* search for connected PHY device */ 406 for (i = 0; i < PHY_MAX_ADDR; ++i) { 407 struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i]; 408 409 if (!tmp_phydev) 410 continue; /* no PHY here... */ 411 412 phydev = tmp_phydev; 413 break; /* found it */ 414 } 415 416 /* now we are supposed to have a proper phydev, to attach to... */ 417 if (!phydev) { 418 netdev_err(dev, "no phy device found\n"); 419 return -ENODEV; 420 } 421 422 if (phy_mode != PHY_INTERFACE_MODE_RMII && 423 phy_mode != PHY_INTERFACE_MODE_MII) { 424 netdev_err(dev, "invalid phy interface mode\n"); 425 return -EINVAL; 426 } 427 428 phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link, 429 0, phy_mode); 430 431 if (IS_ERR(phydev)) { 432 netdev_err(dev, "could not attach PHY\n"); 433 return PTR_ERR(phydev); 434 } 435 436 /* mask with MAC supported features */ 437 phydev->supported &= (SUPPORTED_10baseT_Half 438 | SUPPORTED_10baseT_Full 439 | SUPPORTED_100baseT_Half 440 | SUPPORTED_100baseT_Full 441 | SUPPORTED_Autoneg 442 | SUPPORTED_Pause | SUPPORTED_Asym_Pause 443 | SUPPORTED_MII 444 | SUPPORTED_TP); 445 446 phydev->advertising = phydev->supported; 447 448 lp->old_link = 0; 449 lp->old_speed = 0; 450 lp->old_duplex = -1; 451 lp->phydev = phydev; 452 453 pr_info("attached PHY driver [%s] " 454 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", 455 phydev->drv->name, dev_name(&phydev->dev), phydev->irq, 456 MDC_CLK, mdc_div, sclk/1000000); 457 458 return 0; 459} 460 461/* 462 * Ethtool support 463 */ 464 465/* 466 * interrupt routine for magic packet wakeup 467 */ 468static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id) 469{ 470 return IRQ_HANDLED; 471} 472 473static int 474bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) 475{ 476 struct bfin_mac_local *lp = netdev_priv(dev); 477 478 if (lp->phydev) 479 return phy_ethtool_gset(lp->phydev, cmd); 480 481 return -EINVAL; 482} 483 484static int 485bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) 486{ 487 struct bfin_mac_local *lp = netdev_priv(dev); 488 489 if (!capable(CAP_NET_ADMIN)) 490 return -EPERM; 491 492 if (lp->phydev) 493 return phy_ethtool_sset(lp->phydev, cmd); 494 495 return -EINVAL; 496} 497 498static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, 499 struct ethtool_drvinfo *info) 500{ 501 strcpy(info->driver, KBUILD_MODNAME); 502 strcpy(info->version, DRV_VERSION); 503 strcpy(info->fw_version, "N/A"); 504 strcpy(info->bus_info, dev_name(&dev->dev)); 505} 506 507static void bfin_mac_ethtool_getwol(struct net_device *dev, 508 struct ethtool_wolinfo *wolinfo) 509{ 510 struct bfin_mac_local *lp = netdev_priv(dev); 511 512 wolinfo->supported = WAKE_MAGIC; 513 wolinfo->wolopts = lp->wol; 514} 515 516static int bfin_mac_ethtool_setwol(struct net_device *dev, 517 struct ethtool_wolinfo *wolinfo) 518{ 519 struct bfin_mac_local *lp = netdev_priv(dev); 520 int rc; 521 522 if (wolinfo->wolopts & (WAKE_MAGICSECURE | 523 WAKE_UCAST | 524 WAKE_MCAST | 525 WAKE_BCAST | 526 WAKE_ARP)) 527 return -EOPNOTSUPP; 528 529 lp->wol = wolinfo->wolopts; 530 531 if (lp->wol && !lp->irq_wake_requested) { 532 /* register wake irq handler */ 533 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt, 534 IRQF_DISABLED, "EMAC_WAKE", dev); 535 if (rc) 536 return rc; 537 lp->irq_wake_requested = true; 538 } 539 540 if (!lp->wol && lp->irq_wake_requested) { 541 free_irq(IRQ_MAC_WAKEDET, dev); 542 lp->irq_wake_requested = false; 543 } 544 545 /* Make sure the PHY driver doesn't suspend */ 546 device_init_wakeup(&dev->dev, lp->wol); 547 548 return 0; 549} 550 551static const struct ethtool_ops bfin_mac_ethtool_ops = { 552 .get_settings = bfin_mac_ethtool_getsettings, 553 .set_settings = bfin_mac_ethtool_setsettings, 554 .get_link = ethtool_op_get_link, 555 .get_drvinfo = bfin_mac_ethtool_getdrvinfo, 556 .get_wol = bfin_mac_ethtool_getwol, 557 .set_wol = bfin_mac_ethtool_setwol, 558}; 559 560/**************************************************************************/ 561static void setup_system_regs(struct net_device *dev) 562{ 563 struct bfin_mac_local *lp = netdev_priv(dev); 564 int i; 565 unsigned short sysctl; 566 567 /* 568 * Odd word alignment for Receive Frame DMA word 569 * Configure checksum support and rcve frame word alignment 570 */ 571 sysctl = bfin_read_EMAC_SYSCTL(); 572 /* 573 * check if interrupt is requested for any PHY, 574 * enable PHY interrupt only if needed 575 */ 576 for (i = 0; i < PHY_MAX_ADDR; ++i) 577 if (lp->mii_bus->irq[i] != PHY_POLL) 578 break; 579 if (i < PHY_MAX_ADDR) 580 sysctl |= PHYIE; 581 sysctl |= RXDWA; 582#if defined(BFIN_MAC_CSUM_OFFLOAD) 583 sysctl |= RXCKS; 584#else 585 sysctl &= ~RXCKS; 586#endif 587 bfin_write_EMAC_SYSCTL(sysctl); 588 589 bfin_write_EMAC_MMC_CTL(RSTC | CROLL); 590 591 /* Set vlan regs to let 1522 bytes long packets pass through */ 592 bfin_write_EMAC_VLAN1(lp->vlan1_mask); 593 bfin_write_EMAC_VLAN2(lp->vlan2_mask); 594 595 /* Initialize the TX DMA channel registers */ 596 bfin_write_DMA2_X_COUNT(0); 597 bfin_write_DMA2_X_MODIFY(4); 598 bfin_write_DMA2_Y_COUNT(0); 599 bfin_write_DMA2_Y_MODIFY(0); 600 601 /* Initialize the RX DMA channel registers */ 602 bfin_write_DMA1_X_COUNT(0); 603 bfin_write_DMA1_X_MODIFY(4); 604 bfin_write_DMA1_Y_COUNT(0); 605 bfin_write_DMA1_Y_MODIFY(0); 606} 607 608static void setup_mac_addr(u8 *mac_addr) 609{ 610 u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]); 611 u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]); 612 613 /* this depends on a little-endian machine */ 614 bfin_write_EMAC_ADDRLO(addr_low); 615 bfin_write_EMAC_ADDRHI(addr_hi); 616} 617 618static int bfin_mac_set_mac_address(struct net_device *dev, void *p) 619{ 620 struct sockaddr *addr = p; 621 if (netif_running(dev)) 622 return -EBUSY; 623 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 624 dev->addr_assign_type &= ~NET_ADDR_RANDOM; 625 setup_mac_addr(dev->dev_addr); 626 return 0; 627} 628 629#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP 630#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE) 631 632static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev, 633 struct ifreq *ifr, int cmd) 634{ 635 struct hwtstamp_config config; 636 struct bfin_mac_local *lp = netdev_priv(netdev); 637 u16 ptpctl; 638 u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff; 639 640 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 641 return -EFAULT; 642 643 pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 644 __func__, config.flags, config.tx_type, config.rx_filter); 645 646 /* reserved for future extensions */ 647 if (config.flags) 648 return -EINVAL; 649 650 if ((config.tx_type != HWTSTAMP_TX_OFF) && 651 (config.tx_type != HWTSTAMP_TX_ON)) 652 return -ERANGE; 653 654 ptpctl = bfin_read_EMAC_PTP_CTL(); 655 656 switch (config.rx_filter) { 657 case HWTSTAMP_FILTER_NONE: 658 /* 659 * Dont allow any timestamping 660 */ 661 ptpfv3 = 0xFFFFFFFF; 662 bfin_write_EMAC_PTP_FV3(ptpfv3); 663 break; 664 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 665 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 666 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 667 /* 668 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL) 669 * to enable all the field matches. 670 */ 671 ptpctl &= ~0x1F00; 672 bfin_write_EMAC_PTP_CTL(ptpctl); 673 /* 674 * Keep the default values of the EMAC_PTP_FOFF register. 675 */ 676 ptpfoff = 0x4A24170C; 677 bfin_write_EMAC_PTP_FOFF(ptpfoff); 678 /* 679 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2 680 * registers. 681 */ 682 ptpfv1 = 0x11040800; 683 bfin_write_EMAC_PTP_FV1(ptpfv1); 684 ptpfv2 = 0x0140013F; 685 bfin_write_EMAC_PTP_FV2(ptpfv2); 686 /* 687 * The default value (0xFFFC) allows the timestamping of both 688 * received Sync messages and Delay_Req messages. 689 */ 690 ptpfv3 = 0xFFFFFFFC; 691 bfin_write_EMAC_PTP_FV3(ptpfv3); 692 693 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 694 break; 695 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 696 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 697 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 698 /* Clear all five comparison mask bits (bits[12:8]) in the 699 * EMAC_PTP_CTL register to enable all the field matches. 700 */ 701 ptpctl &= ~0x1F00; 702 bfin_write_EMAC_PTP_CTL(ptpctl); 703 /* 704 * Keep the default values of the EMAC_PTP_FOFF register, except set 705 * the PTPCOF field to 0x2A. 706 */ 707 ptpfoff = 0x2A24170C; 708 bfin_write_EMAC_PTP_FOFF(ptpfoff); 709 /* 710 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2 711 * registers. 712 */ 713 ptpfv1 = 0x11040800; 714 bfin_write_EMAC_PTP_FV1(ptpfv1); 715 ptpfv2 = 0x0140013F; 716 bfin_write_EMAC_PTP_FV2(ptpfv2); 717 /* 718 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set 719 * the value to 0xFFF0. 720 */ 721 ptpfv3 = 0xFFFFFFF0; 722 bfin_write_EMAC_PTP_FV3(ptpfv3); 723 724 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 725 break; 726 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 727 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 728 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 729 /* 730 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the 731 * EFTM and PTPCM field comparison. 732 */ 733 ptpctl &= ~0x1100; 734 bfin_write_EMAC_PTP_CTL(ptpctl); 735 /* 736 * Keep the default values of all the fields of the EMAC_PTP_FOFF 737 * register, except set the PTPCOF field to 0x0E. 738 */ 739 ptpfoff = 0x0E24170C; 740 bfin_write_EMAC_PTP_FOFF(ptpfoff); 741 /* 742 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which 743 * corresponds to PTP messages on the MAC layer. 744 */ 745 ptpfv1 = 0x110488F7; 746 bfin_write_EMAC_PTP_FV1(ptpfv1); 747 ptpfv2 = 0x0140013F; 748 bfin_write_EMAC_PTP_FV2(ptpfv2); 749 /* 750 * To allow the timestamping of Pdelay_Req and Pdelay_Resp 751 * messages, set the value to 0xFFF0. 752 */ 753 ptpfv3 = 0xFFFFFFF0; 754 bfin_write_EMAC_PTP_FV3(ptpfv3); 755 756 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; 757 break; 758 default: 759 return -ERANGE; 760 } 761 762 if (config.tx_type == HWTSTAMP_TX_OFF && 763 bfin_mac_hwtstamp_is_none(config.rx_filter)) { 764 ptpctl &= ~PTP_EN; 765 bfin_write_EMAC_PTP_CTL(ptpctl); 766 767 SSYNC(); 768 } else { 769 ptpctl |= PTP_EN; 770 bfin_write_EMAC_PTP_CTL(ptpctl); 771 772 /* 773 * clear any existing timestamp 774 */ 775 bfin_read_EMAC_PTP_RXSNAPLO(); 776 bfin_read_EMAC_PTP_RXSNAPHI(); 777 778 bfin_read_EMAC_PTP_TXSNAPLO(); 779 bfin_read_EMAC_PTP_TXSNAPHI(); 780 781 /* 782 * Set registers so that rollover occurs soon to test this. 783 */ 784 bfin_write_EMAC_PTP_TIMELO(0x00000000); 785 bfin_write_EMAC_PTP_TIMEHI(0xFF800000); 786 787 SSYNC(); 788 789 lp->compare.last_update = 0; 790 timecounter_init(&lp->clock, 791 &lp->cycles, 792 ktime_to_ns(ktime_get_real())); 793 timecompare_update(&lp->compare, 0); 794 } 795 796 lp->stamp_cfg = config; 797 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 798 -EFAULT : 0; 799} 800 801static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp) 802{ 803 ktime_t sys = ktime_get_real(); 804 805 pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n", 806 __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec, 807 sys.tv.nsec, cmp->offset, cmp->skew); 808} 809 810static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) 811{ 812 struct bfin_mac_local *lp = netdev_priv(netdev); 813 814 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 815 int timeout_cnt = MAX_TIMEOUT_CNT; 816 817 /* When doing time stamping, keep the connection to the socket 818 * a while longer 819 */ 820 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 821 822 /* 823 * The timestamping is done at the EMAC module's MII/RMII interface 824 * when the module sees the Start of Frame of an event message packet. This 825 * interface is the closest possible place to the physical Ethernet transmission 826 * medium, providing the best timing accuracy. 827 */ 828 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) 829 udelay(1); 830 if (timeout_cnt == 0) 831 netdev_err(netdev, "timestamp the TX packet failed\n"); 832 else { 833 struct skb_shared_hwtstamps shhwtstamps; 834 u64 ns; 835 u64 regval; 836 837 regval = bfin_read_EMAC_PTP_TXSNAPLO(); 838 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32; 839 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 840 ns = timecounter_cyc2time(&lp->clock, 841 regval); 842 timecompare_update(&lp->compare, ns); 843 shhwtstamps.hwtstamp = ns_to_ktime(ns); 844 shhwtstamps.syststamp = 845 timecompare_transform(&lp->compare, ns); 846 skb_tstamp_tx(skb, &shhwtstamps); 847 848 bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare); 849 } 850 } 851} 852 853static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) 854{ 855 struct bfin_mac_local *lp = netdev_priv(netdev); 856 u32 valid; 857 u64 regval, ns; 858 struct skb_shared_hwtstamps *shhwtstamps; 859 860 if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter)) 861 return; 862 863 valid = bfin_read_EMAC_PTP_ISTAT() & RXEL; 864 if (!valid) 865 return; 866 867 shhwtstamps = skb_hwtstamps(skb); 868 869 regval = bfin_read_EMAC_PTP_RXSNAPLO(); 870 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32; 871 ns = timecounter_cyc2time(&lp->clock, regval); 872 timecompare_update(&lp->compare, ns); 873 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 874 shhwtstamps->hwtstamp = ns_to_ktime(ns); 875 shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns); 876 877 bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare); 878} 879 880/* 881 * bfin_read_clock - read raw cycle counter (to be used by time counter) 882 */ 883static cycle_t bfin_read_clock(const struct cyclecounter *tc) 884{ 885 u64 stamp; 886 887 stamp = bfin_read_EMAC_PTP_TIMELO(); 888 stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL; 889 890 return stamp; 891} 892 893#define PTP_CLK 25000000 894 895static void bfin_mac_hwtstamp_init(struct net_device *netdev) 896{ 897 struct bfin_mac_local *lp = netdev_priv(netdev); 898 u64 append; 899 900 /* Initialize hardware timer */ 901 append = PTP_CLK * (1ULL << 32); 902 do_div(append, get_sclk()); 903 bfin_write_EMAC_PTP_ADDEND((u32)append); 904 905 memset(&lp->cycles, 0, sizeof(lp->cycles)); 906 lp->cycles.read = bfin_read_clock; 907 lp->cycles.mask = CLOCKSOURCE_MASK(64); 908 lp->cycles.mult = 1000000000 / PTP_CLK; 909 lp->cycles.shift = 0; 910 911 /* Synchronize our NIC clock against system wall clock */ 912 memset(&lp->compare, 0, sizeof(lp->compare)); 913 lp->compare.source = &lp->clock; 914 lp->compare.target = ktime_get_real; 915 lp->compare.num_samples = 10; 916 917 /* Initialize hwstamp config */ 918 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; 919 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; 920} 921 922#else 923# define bfin_mac_hwtstamp_is_none(cfg) 0 924# define bfin_mac_hwtstamp_init(dev) 925# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP) 926# define bfin_rx_hwtstamp(dev, skb) 927# define bfin_tx_hwtstamp(dev, skb) 928#endif 929 930static inline void _tx_reclaim_skb(void) 931{ 932 do { 933 tx_list_head->desc_a.config &= ~DMAEN; 934 tx_list_head->status.status_word = 0; 935 if (tx_list_head->skb) { 936 dev_kfree_skb(tx_list_head->skb); 937 tx_list_head->skb = NULL; 938 } 939 tx_list_head = tx_list_head->next; 940 941 } while (tx_list_head->status.status_word != 0); 942} 943 944static void tx_reclaim_skb(struct bfin_mac_local *lp) 945{ 946 int timeout_cnt = MAX_TIMEOUT_CNT; 947 948 if (tx_list_head->status.status_word != 0) 949 _tx_reclaim_skb(); 950 951 if (current_tx_ptr->next == tx_list_head) { 952 while (tx_list_head->status.status_word == 0) { 953 /* slow down polling to avoid too many queue stop. */ 954 udelay(10); 955 /* reclaim skb if DMA is not running. */ 956 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) 957 break; 958 if (timeout_cnt-- < 0) 959 break; 960 } 961 962 if (timeout_cnt >= 0) 963 _tx_reclaim_skb(); 964 else 965 netif_stop_queue(lp->ndev); 966 } 967 968 if (current_tx_ptr->next != tx_list_head && 969 netif_queue_stopped(lp->ndev)) 970 netif_wake_queue(lp->ndev); 971 972 if (tx_list_head != current_tx_ptr) { 973 /* shorten the timer interval if tx queue is stopped */ 974 if (netif_queue_stopped(lp->ndev)) 975 lp->tx_reclaim_timer.expires = 976 jiffies + (TX_RECLAIM_JIFFIES >> 4); 977 else 978 lp->tx_reclaim_timer.expires = 979 jiffies + TX_RECLAIM_JIFFIES; 980 981 mod_timer(&lp->tx_reclaim_timer, 982 lp->tx_reclaim_timer.expires); 983 } 984 985 return; 986} 987 988static void tx_reclaim_skb_timeout(unsigned long lp) 989{ 990 tx_reclaim_skb((struct bfin_mac_local *)lp); 991} 992 993static int bfin_mac_hard_start_xmit(struct sk_buff *skb, 994 struct net_device *dev) 995{ 996 struct bfin_mac_local *lp = netdev_priv(dev); 997 u16 *data; 998 u32 data_align = (unsigned long)(skb->data) & 0x3; 999 1000 current_tx_ptr->skb = skb; 1001 1002 if (data_align == 0x2) { 1003 /* move skb->data to current_tx_ptr payload */ 1004 data = (u16 *)(skb->data) - 1; 1005 *data = (u16)(skb->len); 1006 /* 1007 * When transmitting an Ethernet packet, the PTP_TSYNC module requires 1008 * a DMA_Length_Word field associated with the packet. The lower 12 bits 1009 * of this field are the length of the packet payload in bytes and the higher 1010 * 4 bits are the timestamping enable field. 1011 */ 1012 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 1013 *data |= 0x1000; 1014 1015 current_tx_ptr->desc_a.start_addr = (u32)data; 1016 /* this is important! */ 1017 blackfin_dcache_flush_range((u32)data, 1018 (u32)((u8 *)data + skb->len + 4)); 1019 } else { 1020 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len); 1021 /* enable timestamping for the sent packet */ 1022 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 1023 *((u16 *)(current_tx_ptr->packet)) |= 0x1000; 1024 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data, 1025 skb->len); 1026 current_tx_ptr->desc_a.start_addr = 1027 (u32)current_tx_ptr->packet; 1028 blackfin_dcache_flush_range( 1029 (u32)current_tx_ptr->packet, 1030 (u32)(current_tx_ptr->packet + skb->len + 2)); 1031 } 1032 1033 /* make sure the internal data buffers in the core are drained 1034 * so that the DMA descriptors are completely written when the 1035 * DMA engine goes to fetch them below 1036 */ 1037 SSYNC(); 1038 1039 /* always clear status buffer before start tx dma */ 1040 current_tx_ptr->status.status_word = 0; 1041 1042 /* enable this packet's dma */ 1043 current_tx_ptr->desc_a.config |= DMAEN; 1044 1045 /* tx dma is running, just return */ 1046 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN) 1047 goto out; 1048 1049 /* tx dma is not running */ 1050 bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a)); 1051 /* dma enabled, read from memory, size is 6 */ 1052 bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config); 1053 /* Turn on the EMAC tx */ 1054 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); 1055 1056out: 1057 bfin_tx_hwtstamp(dev, skb); 1058 1059 current_tx_ptr = current_tx_ptr->next; 1060 dev->stats.tx_packets++; 1061 dev->stats.tx_bytes += (skb->len); 1062 1063 tx_reclaim_skb(lp); 1064 1065 return NETDEV_TX_OK; 1066} 1067 1068#define IP_HEADER_OFF 0 1069#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \ 1070 RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE) 1071 1072static void bfin_mac_rx(struct net_device *dev) 1073{ 1074 struct sk_buff *skb, *new_skb; 1075 unsigned short len; 1076 struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev); 1077#if defined(BFIN_MAC_CSUM_OFFLOAD) 1078 unsigned int i; 1079 unsigned char fcs[ETH_FCS_LEN + 1]; 1080#endif 1081 1082 /* check if frame status word reports an error condition 1083 * we which case we simply drop the packet 1084 */ 1085 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { 1086 netdev_notice(dev, "rx: receive error - packet dropped\n"); 1087 dev->stats.rx_dropped++; 1088 goto out; 1089 } 1090 1091 /* allocate a new skb for next time receive */ 1092 skb = current_rx_ptr->skb; 1093 1094 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 1095 if (!new_skb) { 1096 netdev_notice(dev, "rx: low on mem - packet dropped\n"); 1097 dev->stats.rx_dropped++; 1098 goto out; 1099 } 1100 /* reserve 2 bytes for RXDWA padding */ 1101 skb_reserve(new_skb, NET_IP_ALIGN); 1102 /* Invidate the data cache of skb->data range when it is write back 1103 * cache. It will prevent overwritting the new data from DMA 1104 */ 1105 blackfin_dcache_invalidate_range((unsigned long)new_skb->head, 1106 (unsigned long)new_skb->end); 1107 1108 current_rx_ptr->skb = new_skb; 1109 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; 1110 1111 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); 1112 /* Deduce Ethernet FCS length from Ethernet payload length */ 1113 len -= ETH_FCS_LEN; 1114 skb_put(skb, len); 1115 1116 skb->protocol = eth_type_trans(skb, dev); 1117 1118 bfin_rx_hwtstamp(dev, skb); 1119 1120#if defined(BFIN_MAC_CSUM_OFFLOAD) 1121 /* Checksum offloading only works for IPv4 packets with the standard IP header 1122 * length of 20 bytes, because the blackfin MAC checksum calculation is 1123 * based on that assumption. We must NOT use the calculated checksum if our 1124 * IP version or header break that assumption. 1125 */ 1126 if (skb->data[IP_HEADER_OFF] == 0x45) { 1127 skb->csum = current_rx_ptr->status.ip_payload_csum; 1128 /* 1129 * Deduce Ethernet FCS from hardware generated IP payload checksum. 1130 * IP checksum is based on 16-bit one's complement algorithm. 1131 * To deduce a value from checksum is equal to add its inversion. 1132 * If the IP payload len is odd, the inversed FCS should also 1133 * begin from odd address and leave first byte zero. 1134 */ 1135 if (skb->len % 2) { 1136 fcs[0] = 0; 1137 for (i = 0; i < ETH_FCS_LEN; i++) 1138 fcs[i + 1] = ~skb->data[skb->len + i]; 1139 skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum); 1140 } else { 1141 for (i = 0; i < ETH_FCS_LEN; i++) 1142 fcs[i] = ~skb->data[skb->len + i]; 1143 skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum); 1144 } 1145 skb->ip_summed = CHECKSUM_COMPLETE; 1146 } 1147#endif 1148 1149 netif_rx(skb); 1150 dev->stats.rx_packets++; 1151 dev->stats.rx_bytes += len; 1152out: 1153 current_rx_ptr->status.status_word = 0x00000000; 1154 current_rx_ptr = current_rx_ptr->next; 1155} 1156 1157/* interrupt routine to handle rx and error signal */ 1158static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id) 1159{ 1160 struct net_device *dev = dev_id; 1161 int number = 0; 1162 1163get_one_packet: 1164 if (current_rx_ptr->status.status_word == 0) { 1165 /* no more new packet received */ 1166 if (number == 0) { 1167 if (current_rx_ptr->next->status.status_word != 0) { 1168 current_rx_ptr = current_rx_ptr->next; 1169 goto real_rx; 1170 } 1171 } 1172 bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() | 1173 DMA_DONE | DMA_ERR); 1174 return IRQ_HANDLED; 1175 } 1176 1177real_rx: 1178 bfin_mac_rx(dev); 1179 number++; 1180 goto get_one_packet; 1181} 1182 1183#ifdef CONFIG_NET_POLL_CONTROLLER 1184static void bfin_mac_poll(struct net_device *dev) 1185{ 1186 struct bfin_mac_local *lp = netdev_priv(dev); 1187 1188 disable_irq(IRQ_MAC_RX); 1189 bfin_mac_interrupt(IRQ_MAC_RX, dev); 1190 tx_reclaim_skb(lp); 1191 enable_irq(IRQ_MAC_RX); 1192} 1193#endif /* CONFIG_NET_POLL_CONTROLLER */ 1194 1195static void bfin_mac_disable(void) 1196{ 1197 unsigned int opmode; 1198 1199 opmode = bfin_read_EMAC_OPMODE(); 1200 opmode &= (~RE); 1201 opmode &= (~TE); 1202 /* Turn off the EMAC */ 1203 bfin_write_EMAC_OPMODE(opmode); 1204} 1205 1206/* 1207 * Enable Interrupts, Receive, and Transmit 1208 */ 1209static int bfin_mac_enable(struct phy_device *phydev) 1210{ 1211 int ret; 1212 u32 opmode; 1213 1214 pr_debug("%s\n", __func__); 1215 1216 /* Set RX DMA */ 1217 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); 1218 bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config); 1219 1220 /* Wait MII done */ 1221 ret = bfin_mdio_poll(); 1222 if (ret) 1223 return ret; 1224 1225 /* We enable only RX here */ 1226 /* ASTP : Enable Automatic Pad Stripping 1227 PR : Promiscuous Mode for test 1228 PSF : Receive frames with total length less than 64 bytes. 1229 FDMODE : Full Duplex Mode 1230 LB : Internal Loopback for test 1231 RE : Receiver Enable */ 1232 opmode = bfin_read_EMAC_OPMODE(); 1233 if (opmode & FDMODE) 1234 opmode |= PSF; 1235 else 1236 opmode |= DRO | DC | PSF; 1237 opmode |= RE; 1238 1239 if (phydev->interface == PHY_INTERFACE_MODE_RMII) { 1240 opmode |= RMII; /* For Now only 100MBit are supported */ 1241#if defined(CONFIG_BF537) || defined(CONFIG_BF536) 1242 if (__SILICON_REVISION__ < 3) { 1243 /* 1244 * This isn't publicly documented (fun times!), but in 1245 * silicon <=0.2, the RX and TX pins are clocked together. 1246 * So in order to recv, we must enable the transmit side 1247 * as well. This will cause a spurious TX interrupt too, 1248 * but we can easily consume that. 1249 */ 1250 opmode |= TE; 1251 } 1252#endif 1253 } 1254 1255 /* Turn on the EMAC rx */ 1256 bfin_write_EMAC_OPMODE(opmode); 1257 1258 return 0; 1259} 1260 1261/* Our watchdog timed out. Called by the networking layer */ 1262static void bfin_mac_timeout(struct net_device *dev) 1263{ 1264 struct bfin_mac_local *lp = netdev_priv(dev); 1265 1266 pr_debug("%s: %s\n", dev->name, __func__); 1267 1268 bfin_mac_disable(); 1269 1270 del_timer(&lp->tx_reclaim_timer); 1271 1272 /* reset tx queue and free skb */ 1273 while (tx_list_head != current_tx_ptr) { 1274 tx_list_head->desc_a.config &= ~DMAEN; 1275 tx_list_head->status.status_word = 0; 1276 if (tx_list_head->skb) { 1277 dev_kfree_skb(tx_list_head->skb); 1278 tx_list_head->skb = NULL; 1279 } 1280 tx_list_head = tx_list_head->next; 1281 } 1282 1283 if (netif_queue_stopped(lp->ndev)) 1284 netif_wake_queue(lp->ndev); 1285 1286 bfin_mac_enable(lp->phydev); 1287 1288 /* We can accept TX packets again */ 1289 dev->trans_start = jiffies; /* prevent tx timeout */ 1290 netif_wake_queue(dev); 1291} 1292 1293static void bfin_mac_multicast_hash(struct net_device *dev) 1294{ 1295 u32 emac_hashhi, emac_hashlo; 1296 struct netdev_hw_addr *ha; 1297 u32 crc; 1298 1299 emac_hashhi = emac_hashlo = 0; 1300 1301 netdev_for_each_mc_addr(ha, dev) { 1302 crc = ether_crc(ETH_ALEN, ha->addr); 1303 crc >>= 26; 1304 1305 if (crc & 0x20) 1306 emac_hashhi |= 1 << (crc & 0x1f); 1307 else 1308 emac_hashlo |= 1 << (crc & 0x1f); 1309 } 1310 1311 bfin_write_EMAC_HASHHI(emac_hashhi); 1312 bfin_write_EMAC_HASHLO(emac_hashlo); 1313} 1314 1315/* 1316 * This routine will, depending on the values passed to it, 1317 * either make it accept multicast packets, go into 1318 * promiscuous mode (for TCPDUMP and cousins) or accept 1319 * a select set of multicast packets 1320 */ 1321static void bfin_mac_set_multicast_list(struct net_device *dev) 1322{ 1323 u32 sysctl; 1324 1325 if (dev->flags & IFF_PROMISC) { 1326 netdev_info(dev, "set promisc mode\n"); 1327 sysctl = bfin_read_EMAC_OPMODE(); 1328 sysctl |= PR; 1329 bfin_write_EMAC_OPMODE(sysctl); 1330 } else if (dev->flags & IFF_ALLMULTI) { 1331 /* accept all multicast */ 1332 sysctl = bfin_read_EMAC_OPMODE(); 1333 sysctl |= PAM; 1334 bfin_write_EMAC_OPMODE(sysctl); 1335 } else if (!netdev_mc_empty(dev)) { 1336 /* set up multicast hash table */ 1337 sysctl = bfin_read_EMAC_OPMODE(); 1338 sysctl |= HM; 1339 bfin_write_EMAC_OPMODE(sysctl); 1340 bfin_mac_multicast_hash(dev); 1341 } else { 1342 /* clear promisc or multicast mode */ 1343 sysctl = bfin_read_EMAC_OPMODE(); 1344 sysctl &= ~(RAF | PAM); 1345 bfin_write_EMAC_OPMODE(sysctl); 1346 } 1347} 1348 1349static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1350{ 1351 struct bfin_mac_local *lp = netdev_priv(netdev); 1352 1353 if (!netif_running(netdev)) 1354 return -EINVAL; 1355 1356 switch (cmd) { 1357 case SIOCSHWTSTAMP: 1358 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd); 1359 default: 1360 if (lp->phydev) 1361 return phy_mii_ioctl(lp->phydev, ifr, cmd); 1362 else 1363 return -EOPNOTSUPP; 1364 } 1365} 1366 1367/* 1368 * this puts the device in an inactive state 1369 */ 1370static void bfin_mac_shutdown(struct net_device *dev) 1371{ 1372 /* Turn off the EMAC */ 1373 bfin_write_EMAC_OPMODE(0x00000000); 1374 /* Turn off the EMAC RX DMA */ 1375 bfin_write_DMA1_CONFIG(0x0000); 1376 bfin_write_DMA2_CONFIG(0x0000); 1377} 1378 1379/* 1380 * Open and Initialize the interface 1381 * 1382 * Set up everything, reset the card, etc.. 1383 */ 1384static int bfin_mac_open(struct net_device *dev) 1385{ 1386 struct bfin_mac_local *lp = netdev_priv(dev); 1387 int ret; 1388 pr_debug("%s: %s\n", dev->name, __func__); 1389 1390 /* 1391 * Check that the address is valid. If its not, refuse 1392 * to bring the device up. The user must specify an 1393 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1394 */ 1395 if (!is_valid_ether_addr(dev->dev_addr)) { 1396 netdev_warn(dev, "no valid ethernet hw addr\n"); 1397 return -EINVAL; 1398 } 1399 1400 /* initial rx and tx list */ 1401 ret = desc_list_init(dev); 1402 if (ret) 1403 return ret; 1404 1405 phy_start(lp->phydev); 1406 phy_write(lp->phydev, MII_BMCR, BMCR_RESET); 1407 setup_system_regs(dev); 1408 setup_mac_addr(dev->dev_addr); 1409 1410 bfin_mac_disable(); 1411 ret = bfin_mac_enable(lp->phydev); 1412 if (ret) 1413 return ret; 1414 pr_debug("hardware init finished\n"); 1415 1416 netif_start_queue(dev); 1417 netif_carrier_on(dev); 1418 1419 return 0; 1420} 1421 1422/* 1423 * this makes the board clean up everything that it can 1424 * and not talk to the outside world. Caused by 1425 * an 'ifconfig ethX down' 1426 */ 1427static int bfin_mac_close(struct net_device *dev) 1428{ 1429 struct bfin_mac_local *lp = netdev_priv(dev); 1430 pr_debug("%s: %s\n", dev->name, __func__); 1431 1432 netif_stop_queue(dev); 1433 netif_carrier_off(dev); 1434 1435 phy_stop(lp->phydev); 1436 phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); 1437 1438 /* clear everything */ 1439 bfin_mac_shutdown(dev); 1440 1441 /* free the rx/tx buffers */ 1442 desc_list_free(); 1443 1444 return 0; 1445} 1446 1447static const struct net_device_ops bfin_mac_netdev_ops = { 1448 .ndo_open = bfin_mac_open, 1449 .ndo_stop = bfin_mac_close, 1450 .ndo_start_xmit = bfin_mac_hard_start_xmit, 1451 .ndo_set_mac_address = bfin_mac_set_mac_address, 1452 .ndo_tx_timeout = bfin_mac_timeout, 1453 .ndo_set_rx_mode = bfin_mac_set_multicast_list, 1454 .ndo_do_ioctl = bfin_mac_ioctl, 1455 .ndo_validate_addr = eth_validate_addr, 1456 .ndo_change_mtu = eth_change_mtu, 1457#ifdef CONFIG_NET_POLL_CONTROLLER 1458 .ndo_poll_controller = bfin_mac_poll, 1459#endif 1460}; 1461 1462static int __devinit bfin_mac_probe(struct platform_device *pdev) 1463{ 1464 struct net_device *ndev; 1465 struct bfin_mac_local *lp; 1466 struct platform_device *pd; 1467 struct bfin_mii_bus_platform_data *mii_bus_data; 1468 int rc; 1469 1470 ndev = alloc_etherdev(sizeof(struct bfin_mac_local)); 1471 if (!ndev) 1472 return -ENOMEM; 1473 1474 SET_NETDEV_DEV(ndev, &pdev->dev); 1475 platform_set_drvdata(pdev, ndev); 1476 lp = netdev_priv(ndev); 1477 lp->ndev = ndev; 1478 1479 /* Grab the MAC address in the MAC */ 1480 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); 1481 *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); 1482 1483 /* probe mac */ 1484 /*todo: how to proble? which is revision_register */ 1485 bfin_write_EMAC_ADDRLO(0x12345678); 1486 if (bfin_read_EMAC_ADDRLO() != 0x12345678) { 1487 dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n"); 1488 rc = -ENODEV; 1489 goto out_err_probe_mac; 1490 } 1491 1492 1493 /* 1494 * Is it valid? (Did bootloader initialize it?) 1495 * Grab the MAC from the board somehow 1496 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c 1497 */ 1498 if (!is_valid_ether_addr(ndev->dev_addr)) { 1499 if (bfin_get_ether_addr(ndev->dev_addr) || 1500 !is_valid_ether_addr(ndev->dev_addr)) { 1501 /* Still not valid, get a random one */ 1502 netdev_warn(ndev, "Setting Ethernet MAC to a random one\n"); 1503 eth_hw_addr_random(ndev); 1504 } 1505 } 1506 1507 setup_mac_addr(ndev->dev_addr); 1508 1509 if (!pdev->dev.platform_data) { 1510 dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n"); 1511 rc = -ENODEV; 1512 goto out_err_probe_mac; 1513 } 1514 pd = pdev->dev.platform_data; 1515 lp->mii_bus = platform_get_drvdata(pd); 1516 if (!lp->mii_bus) { 1517 dev_err(&pdev->dev, "Cannot get mii_bus!\n"); 1518 rc = -ENODEV; 1519 goto out_err_probe_mac; 1520 } 1521 lp->mii_bus->priv = ndev; 1522 mii_bus_data = pd->dev.platform_data; 1523 1524 rc = mii_probe(ndev, mii_bus_data->phy_mode); 1525 if (rc) { 1526 dev_err(&pdev->dev, "MII Probe failed!\n"); 1527 goto out_err_mii_probe; 1528 } 1529 1530 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; 1531 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; 1532 1533 /* Fill in the fields of the device structure with ethernet values. */ 1534 ether_setup(ndev); 1535 1536 ndev->netdev_ops = &bfin_mac_netdev_ops; 1537 ndev->ethtool_ops = &bfin_mac_ethtool_ops; 1538 1539 init_timer(&lp->tx_reclaim_timer); 1540 lp->tx_reclaim_timer.data = (unsigned long)lp; 1541 lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout; 1542 1543 spin_lock_init(&lp->lock); 1544 1545 /* now, enable interrupts */ 1546 /* register irq handler */ 1547 rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt, 1548 IRQF_DISABLED, "EMAC_RX", ndev); 1549 if (rc) { 1550 dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n"); 1551 rc = -EBUSY; 1552 goto out_err_request_irq; 1553 } 1554 1555 rc = register_netdev(ndev); 1556 if (rc) { 1557 dev_err(&pdev->dev, "Cannot register net device!\n"); 1558 goto out_err_reg_ndev; 1559 } 1560 1561 bfin_mac_hwtstamp_init(ndev); 1562 1563 /* now, print out the card info, in a short format.. */ 1564 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1565 1566 return 0; 1567 1568out_err_reg_ndev: 1569 free_irq(IRQ_MAC_RX, ndev); 1570out_err_request_irq: 1571out_err_mii_probe: 1572 mdiobus_unregister(lp->mii_bus); 1573 mdiobus_free(lp->mii_bus); 1574out_err_probe_mac: 1575 platform_set_drvdata(pdev, NULL); 1576 free_netdev(ndev); 1577 1578 return rc; 1579} 1580 1581static int __devexit bfin_mac_remove(struct platform_device *pdev) 1582{ 1583 struct net_device *ndev = platform_get_drvdata(pdev); 1584 struct bfin_mac_local *lp = netdev_priv(ndev); 1585 1586 platform_set_drvdata(pdev, NULL); 1587 1588 lp->mii_bus->priv = NULL; 1589 1590 unregister_netdev(ndev); 1591 1592 free_irq(IRQ_MAC_RX, ndev); 1593 1594 free_netdev(ndev); 1595 1596 return 0; 1597} 1598 1599#ifdef CONFIG_PM 1600static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg) 1601{ 1602 struct net_device *net_dev = platform_get_drvdata(pdev); 1603 struct bfin_mac_local *lp = netdev_priv(net_dev); 1604 1605 if (lp->wol) { 1606 bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE); 1607 bfin_write_EMAC_WKUP_CTL(MPKE); 1608 enable_irq_wake(IRQ_MAC_WAKEDET); 1609 } else { 1610 if (netif_running(net_dev)) 1611 bfin_mac_close(net_dev); 1612 } 1613 1614 return 0; 1615} 1616 1617static int bfin_mac_resume(struct platform_device *pdev) 1618{ 1619 struct net_device *net_dev = platform_get_drvdata(pdev); 1620 struct bfin_mac_local *lp = netdev_priv(net_dev); 1621 1622 if (lp->wol) { 1623 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); 1624 bfin_write_EMAC_WKUP_CTL(0); 1625 disable_irq_wake(IRQ_MAC_WAKEDET); 1626 } else { 1627 if (netif_running(net_dev)) 1628 bfin_mac_open(net_dev); 1629 } 1630 1631 return 0; 1632} 1633#else 1634#define bfin_mac_suspend NULL 1635#define bfin_mac_resume NULL 1636#endif /* CONFIG_PM */ 1637 1638static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) 1639{ 1640 struct mii_bus *miibus; 1641 struct bfin_mii_bus_platform_data *mii_bus_pd; 1642 const unsigned short *pin_req; 1643 int rc, i; 1644 1645 mii_bus_pd = dev_get_platdata(&pdev->dev); 1646 if (!mii_bus_pd) { 1647 dev_err(&pdev->dev, "No peripherals in platform data!\n"); 1648 return -EINVAL; 1649 } 1650 1651 /* 1652 * We are setting up a network card, 1653 * so set the GPIO pins to Ethernet mode 1654 */ 1655 pin_req = mii_bus_pd->mac_peripherals; 1656 rc = peripheral_request_list(pin_req, KBUILD_MODNAME); 1657 if (rc) { 1658 dev_err(&pdev->dev, "Requesting peripherals failed!\n"); 1659 return rc; 1660 } 1661 1662 rc = -ENOMEM; 1663 miibus = mdiobus_alloc(); 1664 if (miibus == NULL) 1665 goto out_err_alloc; 1666 miibus->read = bfin_mdiobus_read; 1667 miibus->write = bfin_mdiobus_write; 1668 miibus->reset = bfin_mdiobus_reset; 1669 1670 miibus->parent = &pdev->dev; 1671 miibus->name = "bfin_mii_bus"; 1672 miibus->phy_mask = mii_bus_pd->phy_mask; 1673 1674 snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x", 1675 pdev->name, pdev->id); 1676 miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1677 if (!miibus->irq) 1678 goto out_err_irq_alloc; 1679 1680 for (i = rc; i < PHY_MAX_ADDR; ++i) 1681 miibus->irq[i] = PHY_POLL; 1682 1683 rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR); 1684 if (rc != mii_bus_pd->phydev_number) 1685 dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n", 1686 mii_bus_pd->phydev_number); 1687 for (i = 0; i < rc; ++i) { 1688 unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr; 1689 if (phyaddr < PHY_MAX_ADDR) 1690 miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq; 1691 else 1692 dev_err(&pdev->dev, 1693 "Invalid PHY address %i for phydev %i\n", 1694 phyaddr, i); 1695 } 1696 1697 rc = mdiobus_register(miibus); 1698 if (rc) { 1699 dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); 1700 goto out_err_mdiobus_register; 1701 } 1702 1703 platform_set_drvdata(pdev, miibus); 1704 return 0; 1705 1706out_err_mdiobus_register: 1707 kfree(miibus->irq); 1708out_err_irq_alloc: 1709 mdiobus_free(miibus); 1710out_err_alloc: 1711 peripheral_free_list(pin_req); 1712 1713 return rc; 1714} 1715 1716static int __devexit bfin_mii_bus_remove(struct platform_device *pdev) 1717{ 1718 struct mii_bus *miibus = platform_get_drvdata(pdev); 1719 struct bfin_mii_bus_platform_data *mii_bus_pd = 1720 dev_get_platdata(&pdev->dev); 1721 1722 platform_set_drvdata(pdev, NULL); 1723 mdiobus_unregister(miibus); 1724 kfree(miibus->irq); 1725 mdiobus_free(miibus); 1726 peripheral_free_list(mii_bus_pd->mac_peripherals); 1727 1728 return 0; 1729} 1730 1731static struct platform_driver bfin_mii_bus_driver = { 1732 .probe = bfin_mii_bus_probe, 1733 .remove = __devexit_p(bfin_mii_bus_remove), 1734 .driver = { 1735 .name = "bfin_mii_bus", 1736 .owner = THIS_MODULE, 1737 }, 1738}; 1739 1740static struct platform_driver bfin_mac_driver = { 1741 .probe = bfin_mac_probe, 1742 .remove = __devexit_p(bfin_mac_remove), 1743 .resume = bfin_mac_resume, 1744 .suspend = bfin_mac_suspend, 1745 .driver = { 1746 .name = KBUILD_MODNAME, 1747 .owner = THIS_MODULE, 1748 }, 1749}; 1750 1751static int __init bfin_mac_init(void) 1752{ 1753 int ret; 1754 ret = platform_driver_register(&bfin_mii_bus_driver); 1755 if (!ret) 1756 return platform_driver_register(&bfin_mac_driver); 1757 return -ENODEV; 1758} 1759 1760module_init(bfin_mac_init); 1761 1762static void __exit bfin_mac_cleanup(void) 1763{ 1764 platform_driver_unregister(&bfin_mac_driver); 1765 platform_driver_unregister(&bfin_mii_bus_driver); 1766} 1767 1768module_exit(bfin_mac_cleanup); 1769 1770