sh_eth.c revision 5e7a76be0e48217aff6b6f34bdcce4725db999e2
1/* 2 * SuperH Ethernet device driver 3 * 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 5 * Copyright (C) 2008-2012 Renesas Solutions Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * The full GNU General Public License is included in this distribution in 20 * the file called "COPYING". 21 */ 22 23#include <linux/init.h> 24#include <linux/module.h> 25#include <linux/kernel.h> 26#include <linux/spinlock.h> 27#include <linux/interrupt.h> 28#include <linux/dma-mapping.h> 29#include <linux/etherdevice.h> 30#include <linux/delay.h> 31#include <linux/platform_device.h> 32#include <linux/mdio-bitbang.h> 33#include <linux/netdevice.h> 34#include <linux/phy.h> 35#include <linux/cache.h> 36#include <linux/io.h> 37#include <linux/pm_runtime.h> 38#include <linux/slab.h> 39#include <linux/ethtool.h> 40#include <linux/if_vlan.h> 41#include <linux/clk.h> 42#include <linux/sh_eth.h> 43 44#include "sh_eth.h" 45 46#define SH_ETH_DEF_MSG_ENABLE \ 47 (NETIF_MSG_LINK | \ 48 NETIF_MSG_TIMER | \ 49 NETIF_MSG_RX_ERR| \ 50 NETIF_MSG_TX_ERR) 51 52#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ 53 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 54 defined(CONFIG_ARCH_R8A7740) 55static void sh_eth_select_mii(struct net_device *ndev) 56{ 57 u32 value = 0x0; 58 struct sh_eth_private *mdp = netdev_priv(ndev); 59 60 switch (mdp->phy_interface) { 61 case PHY_INTERFACE_MODE_GMII: 62 value = 0x2; 63 break; 64 case PHY_INTERFACE_MODE_MII: 65 value = 0x1; 66 break; 67 case PHY_INTERFACE_MODE_RMII: 68 value = 0x0; 69 break; 70 default: 71 pr_warn("PHY interface mode was not setup. Set to MII.\n"); 72 value = 0x1; 73 break; 74 } 75 76 sh_eth_write(ndev, value, RMII_MII); 77} 78#endif 79 80/* There is CPU dependent code */ 81#if defined(CONFIG_CPU_SUBTYPE_SH7724) 82#define SH_ETH_RESET_DEFAULT 1 83static void sh_eth_set_duplex(struct net_device *ndev) 84{ 85 struct sh_eth_private *mdp = netdev_priv(ndev); 86 87 if (mdp->duplex) /* Full */ 88 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 89 else /* Half */ 90 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 91} 92 93static void sh_eth_set_rate(struct net_device *ndev) 94{ 95 struct sh_eth_private *mdp = netdev_priv(ndev); 96 97 switch (mdp->speed) { 98 case 10: /* 10BASE */ 99 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); 100 break; 101 case 100:/* 100BASE */ 102 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); 103 break; 104 default: 105 break; 106 } 107} 108 109/* SH7724 */ 110static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 111 .set_duplex = sh_eth_set_duplex, 112 .set_rate = sh_eth_set_rate, 113 114 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, 115 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, 116 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f, 117 118 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 119 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 120 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 121 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 122 123 .apr = 1, 124 .mpr = 1, 125 .tpauser = 1, 126 .hw_swap = 1, 127 .rpadir = 1, 128 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ 129}; 130#elif defined(CONFIG_CPU_SUBTYPE_SH7757) 131#define SH_ETH_HAS_BOTH_MODULES 1 132#define SH_ETH_HAS_TSU 1 133static void sh_eth_set_duplex(struct net_device *ndev) 134{ 135 struct sh_eth_private *mdp = netdev_priv(ndev); 136 137 if (mdp->duplex) /* Full */ 138 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 139 else /* Half */ 140 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 141} 142 143static void sh_eth_set_rate(struct net_device *ndev) 144{ 145 struct sh_eth_private *mdp = netdev_priv(ndev); 146 147 switch (mdp->speed) { 148 case 10: /* 10BASE */ 149 sh_eth_write(ndev, 0, RTRATE); 150 break; 151 case 100:/* 100BASE */ 152 sh_eth_write(ndev, 1, RTRATE); 153 break; 154 default: 155 break; 156 } 157} 158 159/* SH7757 */ 160static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 161 .set_duplex = sh_eth_set_duplex, 162 .set_rate = sh_eth_set_rate, 163 164 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 165 .rmcr_value = 0x00000001, 166 167 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, 168 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | 169 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, 170 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, 171 172 .apr = 1, 173 .mpr = 1, 174 .tpauser = 1, 175 .hw_swap = 1, 176 .no_ade = 1, 177 .rpadir = 1, 178 .rpadir_value = 2 << 16, 179}; 180 181#define SH_GIGA_ETH_BASE 0xfee00000 182#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) 183#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) 184static void sh_eth_chip_reset_giga(struct net_device *ndev) 185{ 186 int i; 187 unsigned long mahr[2], malr[2]; 188 189 /* save MAHR and MALR */ 190 for (i = 0; i < 2; i++) { 191 malr[i] = ioread32((void *)GIGA_MALR(i)); 192 mahr[i] = ioread32((void *)GIGA_MAHR(i)); 193 } 194 195 /* reset device */ 196 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); 197 mdelay(1); 198 199 /* restore MAHR and MALR */ 200 for (i = 0; i < 2; i++) { 201 iowrite32(malr[i], (void *)GIGA_MALR(i)); 202 iowrite32(mahr[i], (void *)GIGA_MAHR(i)); 203 } 204} 205 206static int sh_eth_is_gether(struct sh_eth_private *mdp); 207static void sh_eth_reset(struct net_device *ndev) 208{ 209 struct sh_eth_private *mdp = netdev_priv(ndev); 210 int cnt = 100; 211 212 if (sh_eth_is_gether(mdp)) { 213 sh_eth_write(ndev, 0x03, EDSR); 214 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, 215 EDMR); 216 while (cnt > 0) { 217 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 218 break; 219 mdelay(1); 220 cnt--; 221 } 222 if (cnt < 0) 223 printk(KERN_ERR "Device reset fail\n"); 224 225 /* Table Init */ 226 sh_eth_write(ndev, 0x0, TDLAR); 227 sh_eth_write(ndev, 0x0, TDFAR); 228 sh_eth_write(ndev, 0x0, TDFXR); 229 sh_eth_write(ndev, 0x0, TDFFR); 230 sh_eth_write(ndev, 0x0, RDLAR); 231 sh_eth_write(ndev, 0x0, RDFAR); 232 sh_eth_write(ndev, 0x0, RDFXR); 233 sh_eth_write(ndev, 0x0, RDFFR); 234 } else { 235 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, 236 EDMR); 237 mdelay(3); 238 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, 239 EDMR); 240 } 241} 242 243static void sh_eth_set_duplex_giga(struct net_device *ndev) 244{ 245 struct sh_eth_private *mdp = netdev_priv(ndev); 246 247 if (mdp->duplex) /* Full */ 248 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 249 else /* Half */ 250 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 251} 252 253static void sh_eth_set_rate_giga(struct net_device *ndev) 254{ 255 struct sh_eth_private *mdp = netdev_priv(ndev); 256 257 switch (mdp->speed) { 258 case 10: /* 10BASE */ 259 sh_eth_write(ndev, 0x00000000, GECMR); 260 break; 261 case 100:/* 100BASE */ 262 sh_eth_write(ndev, 0x00000010, GECMR); 263 break; 264 case 1000: /* 1000BASE */ 265 sh_eth_write(ndev, 0x00000020, GECMR); 266 break; 267 default: 268 break; 269 } 270} 271 272/* SH7757(GETHERC) */ 273static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { 274 .chip_reset = sh_eth_chip_reset_giga, 275 .set_duplex = sh_eth_set_duplex_giga, 276 .set_rate = sh_eth_set_rate_giga, 277 278 .ecsr_value = ECSR_ICD | ECSR_MPD, 279 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 280 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 281 282 .tx_check = EESR_TC1 | EESR_FTC, 283 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 284 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 285 EESR_ECI, 286 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 287 EESR_TFE, 288 .fdr_value = 0x0000072f, 289 .rmcr_value = 0x00000001, 290 291 .apr = 1, 292 .mpr = 1, 293 .tpauser = 1, 294 .bculr = 1, 295 .hw_swap = 1, 296 .rpadir = 1, 297 .rpadir_value = 2 << 16, 298 .no_trimd = 1, 299 .no_ade = 1, 300 .tsu = 1, 301}; 302 303static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) 304{ 305 if (sh_eth_is_gether(mdp)) 306 return &sh_eth_my_cpu_data_giga; 307 else 308 return &sh_eth_my_cpu_data; 309} 310 311#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 312#define SH_ETH_HAS_TSU 1 313static void sh_eth_reset_hw_crc(struct net_device *ndev); 314 315static void sh_eth_chip_reset(struct net_device *ndev) 316{ 317 struct sh_eth_private *mdp = netdev_priv(ndev); 318 319 /* reset device */ 320 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 321 mdelay(1); 322} 323 324static void sh_eth_set_duplex(struct net_device *ndev) 325{ 326 struct sh_eth_private *mdp = netdev_priv(ndev); 327 328 if (mdp->duplex) /* Full */ 329 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 330 else /* Half */ 331 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 332} 333 334static void sh_eth_set_rate(struct net_device *ndev) 335{ 336 struct sh_eth_private *mdp = netdev_priv(ndev); 337 338 switch (mdp->speed) { 339 case 10: /* 10BASE */ 340 sh_eth_write(ndev, GECMR_10, GECMR); 341 break; 342 case 100:/* 100BASE */ 343 sh_eth_write(ndev, GECMR_100, GECMR); 344 break; 345 case 1000: /* 1000BASE */ 346 sh_eth_write(ndev, GECMR_1000, GECMR); 347 break; 348 default: 349 break; 350 } 351} 352 353/* sh7763 */ 354static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 355 .chip_reset = sh_eth_chip_reset, 356 .set_duplex = sh_eth_set_duplex, 357 .set_rate = sh_eth_set_rate, 358 359 .ecsr_value = ECSR_ICD | ECSR_MPD, 360 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 361 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 362 363 .tx_check = EESR_TC1 | EESR_FTC, 364 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 365 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 366 EESR_ECI, 367 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 368 EESR_TFE, 369 370 .apr = 1, 371 .mpr = 1, 372 .tpauser = 1, 373 .bculr = 1, 374 .hw_swap = 1, 375 .no_trimd = 1, 376 .no_ade = 1, 377 .tsu = 1, 378#if defined(CONFIG_CPU_SUBTYPE_SH7734) 379 .hw_crc = 1, 380 .select_mii = 1, 381#endif 382}; 383 384static void sh_eth_reset(struct net_device *ndev) 385{ 386 int cnt = 100; 387 388 sh_eth_write(ndev, EDSR_ENALL, EDSR); 389 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 390 while (cnt > 0) { 391 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 392 break; 393 mdelay(1); 394 cnt--; 395 } 396 if (cnt == 0) 397 printk(KERN_ERR "Device reset fail\n"); 398 399 /* Table Init */ 400 sh_eth_write(ndev, 0x0, TDLAR); 401 sh_eth_write(ndev, 0x0, TDFAR); 402 sh_eth_write(ndev, 0x0, TDFXR); 403 sh_eth_write(ndev, 0x0, TDFFR); 404 sh_eth_write(ndev, 0x0, RDLAR); 405 sh_eth_write(ndev, 0x0, RDFAR); 406 sh_eth_write(ndev, 0x0, RDFXR); 407 sh_eth_write(ndev, 0x0, RDFFR); 408 409 /* Reset HW CRC register */ 410 sh_eth_reset_hw_crc(ndev); 411 412 /* Select MII mode */ 413 if (sh_eth_my_cpu_data.select_mii) 414 sh_eth_select_mii(ndev); 415} 416 417static void sh_eth_reset_hw_crc(struct net_device *ndev) 418{ 419 if (sh_eth_my_cpu_data.hw_crc) 420 sh_eth_write(ndev, 0x0, CSMR); 421} 422 423#elif defined(CONFIG_ARCH_R8A7740) 424#define SH_ETH_HAS_TSU 1 425static void sh_eth_chip_reset(struct net_device *ndev) 426{ 427 struct sh_eth_private *mdp = netdev_priv(ndev); 428 429 /* reset device */ 430 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); 431 mdelay(1); 432 433 sh_eth_select_mii(ndev); 434} 435 436static void sh_eth_reset(struct net_device *ndev) 437{ 438 int cnt = 100; 439 440 sh_eth_write(ndev, EDSR_ENALL, EDSR); 441 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); 442 while (cnt > 0) { 443 if (!(sh_eth_read(ndev, EDMR) & 0x3)) 444 break; 445 mdelay(1); 446 cnt--; 447 } 448 if (cnt == 0) 449 printk(KERN_ERR "Device reset fail\n"); 450 451 /* Table Init */ 452 sh_eth_write(ndev, 0x0, TDLAR); 453 sh_eth_write(ndev, 0x0, TDFAR); 454 sh_eth_write(ndev, 0x0, TDFXR); 455 sh_eth_write(ndev, 0x0, TDFFR); 456 sh_eth_write(ndev, 0x0, RDLAR); 457 sh_eth_write(ndev, 0x0, RDFAR); 458 sh_eth_write(ndev, 0x0, RDFXR); 459 sh_eth_write(ndev, 0x0, RDFFR); 460} 461 462static void sh_eth_set_duplex(struct net_device *ndev) 463{ 464 struct sh_eth_private *mdp = netdev_priv(ndev); 465 466 if (mdp->duplex) /* Full */ 467 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); 468 else /* Half */ 469 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); 470} 471 472static void sh_eth_set_rate(struct net_device *ndev) 473{ 474 struct sh_eth_private *mdp = netdev_priv(ndev); 475 476 switch (mdp->speed) { 477 case 10: /* 10BASE */ 478 sh_eth_write(ndev, GECMR_10, GECMR); 479 break; 480 case 100:/* 100BASE */ 481 sh_eth_write(ndev, GECMR_100, GECMR); 482 break; 483 case 1000: /* 1000BASE */ 484 sh_eth_write(ndev, GECMR_1000, GECMR); 485 break; 486 default: 487 break; 488 } 489} 490 491/* R8A7740 */ 492static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 493 .chip_reset = sh_eth_chip_reset, 494 .set_duplex = sh_eth_set_duplex, 495 .set_rate = sh_eth_set_rate, 496 497 .ecsr_value = ECSR_ICD | ECSR_MPD, 498 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 499 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 500 501 .tx_check = EESR_TC1 | EESR_FTC, 502 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ 503 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ 504 EESR_ECI, 505 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ 506 EESR_TFE, 507 508 .apr = 1, 509 .mpr = 1, 510 .tpauser = 1, 511 .bculr = 1, 512 .hw_swap = 1, 513 .no_trimd = 1, 514 .no_ade = 1, 515 .tsu = 1, 516 .select_mii = 1, 517}; 518 519#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 520#define SH_ETH_RESET_DEFAULT 1 521static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 522 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 523 524 .apr = 1, 525 .mpr = 1, 526 .tpauser = 1, 527 .hw_swap = 1, 528}; 529#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 530#define SH_ETH_RESET_DEFAULT 1 531#define SH_ETH_HAS_TSU 1 532static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 533 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 534 .tsu = 1, 535}; 536#endif 537 538static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) 539{ 540 if (!cd->ecsr_value) 541 cd->ecsr_value = DEFAULT_ECSR_INIT; 542 543 if (!cd->ecsipr_value) 544 cd->ecsipr_value = DEFAULT_ECSIPR_INIT; 545 546 if (!cd->fcftr_value) 547 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \ 548 DEFAULT_FIFO_F_D_RFD; 549 550 if (!cd->fdr_value) 551 cd->fdr_value = DEFAULT_FDR_INIT; 552 553 if (!cd->rmcr_value) 554 cd->rmcr_value = DEFAULT_RMCR_VALUE; 555 556 if (!cd->tx_check) 557 cd->tx_check = DEFAULT_TX_CHECK; 558 559 if (!cd->eesr_err_check) 560 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; 561 562 if (!cd->tx_error_check) 563 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; 564} 565 566#if defined(SH_ETH_RESET_DEFAULT) 567/* Chip Reset */ 568static void sh_eth_reset(struct net_device *ndev) 569{ 570 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); 571 mdelay(3); 572 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); 573} 574#endif 575 576#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 577static void sh_eth_set_receive_align(struct sk_buff *skb) 578{ 579 int reserve; 580 581 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); 582 if (reserve) 583 skb_reserve(skb, reserve); 584} 585#else 586static void sh_eth_set_receive_align(struct sk_buff *skb) 587{ 588 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); 589} 590#endif 591 592 593/* CPU <-> EDMAC endian convert */ 594static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 595{ 596 switch (mdp->edmac_endian) { 597 case EDMAC_LITTLE_ENDIAN: 598 return cpu_to_le32(x); 599 case EDMAC_BIG_ENDIAN: 600 return cpu_to_be32(x); 601 } 602 return x; 603} 604 605static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 606{ 607 switch (mdp->edmac_endian) { 608 case EDMAC_LITTLE_ENDIAN: 609 return le32_to_cpu(x); 610 case EDMAC_BIG_ENDIAN: 611 return be32_to_cpu(x); 612 } 613 return x; 614} 615 616/* 617 * Program the hardware MAC address from dev->dev_addr. 618 */ 619static void update_mac_address(struct net_device *ndev) 620{ 621 sh_eth_write(ndev, 622 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 623 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); 624 sh_eth_write(ndev, 625 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); 626} 627 628/* 629 * Get MAC address from SuperH MAC address register 630 * 631 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 632 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 633 * When you want use this device, you must set MAC address in bootloader. 634 * 635 */ 636static void read_mac_address(struct net_device *ndev, unsigned char *mac) 637{ 638 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 639 memcpy(ndev->dev_addr, mac, 6); 640 } else { 641 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); 642 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; 643 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; 644 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); 645 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; 646 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); 647 } 648} 649 650static int sh_eth_is_gether(struct sh_eth_private *mdp) 651{ 652 if (mdp->reg_offset == sh_eth_offset_gigabit) 653 return 1; 654 else 655 return 0; 656} 657 658static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) 659{ 660 if (sh_eth_is_gether(mdp)) 661 return EDTRR_TRNS_GETHER; 662 else 663 return EDTRR_TRNS_ETHER; 664} 665 666struct bb_info { 667 void (*set_gate)(void *addr); 668 struct mdiobb_ctrl ctrl; 669 void *addr; 670 u32 mmd_msk;/* MMD */ 671 u32 mdo_msk; 672 u32 mdi_msk; 673 u32 mdc_msk; 674}; 675 676/* PHY bit set */ 677static void bb_set(void *addr, u32 msk) 678{ 679 iowrite32(ioread32(addr) | msk, addr); 680} 681 682/* PHY bit clear */ 683static void bb_clr(void *addr, u32 msk) 684{ 685 iowrite32((ioread32(addr) & ~msk), addr); 686} 687 688/* PHY bit read */ 689static int bb_read(void *addr, u32 msk) 690{ 691 return (ioread32(addr) & msk) != 0; 692} 693 694/* Data I/O pin control */ 695static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 696{ 697 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 698 699 if (bitbang->set_gate) 700 bitbang->set_gate(bitbang->addr); 701 702 if (bit) 703 bb_set(bitbang->addr, bitbang->mmd_msk); 704 else 705 bb_clr(bitbang->addr, bitbang->mmd_msk); 706} 707 708/* Set bit data*/ 709static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 710{ 711 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 712 713 if (bitbang->set_gate) 714 bitbang->set_gate(bitbang->addr); 715 716 if (bit) 717 bb_set(bitbang->addr, bitbang->mdo_msk); 718 else 719 bb_clr(bitbang->addr, bitbang->mdo_msk); 720} 721 722/* Get bit data*/ 723static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 724{ 725 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 726 727 if (bitbang->set_gate) 728 bitbang->set_gate(bitbang->addr); 729 730 return bb_read(bitbang->addr, bitbang->mdi_msk); 731} 732 733/* MDC pin control */ 734static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 735{ 736 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 737 738 if (bitbang->set_gate) 739 bitbang->set_gate(bitbang->addr); 740 741 if (bit) 742 bb_set(bitbang->addr, bitbang->mdc_msk); 743 else 744 bb_clr(bitbang->addr, bitbang->mdc_msk); 745} 746 747/* mdio bus control struct */ 748static struct mdiobb_ops bb_ops = { 749 .owner = THIS_MODULE, 750 .set_mdc = sh_mdc_ctrl, 751 .set_mdio_dir = sh_mmd_ctrl, 752 .set_mdio_data = sh_set_mdio, 753 .get_mdio_data = sh_get_mdio, 754}; 755 756/* free skb and descriptor buffer */ 757static void sh_eth_ring_free(struct net_device *ndev) 758{ 759 struct sh_eth_private *mdp = netdev_priv(ndev); 760 int i; 761 762 /* Free Rx skb ringbuffer */ 763 if (mdp->rx_skbuff) { 764 for (i = 0; i < RX_RING_SIZE; i++) { 765 if (mdp->rx_skbuff[i]) 766 dev_kfree_skb(mdp->rx_skbuff[i]); 767 } 768 } 769 kfree(mdp->rx_skbuff); 770 771 /* Free Tx skb ringbuffer */ 772 if (mdp->tx_skbuff) { 773 for (i = 0; i < TX_RING_SIZE; i++) { 774 if (mdp->tx_skbuff[i]) 775 dev_kfree_skb(mdp->tx_skbuff[i]); 776 } 777 } 778 kfree(mdp->tx_skbuff); 779} 780 781/* format skb and descriptor buffer */ 782static void sh_eth_ring_format(struct net_device *ndev) 783{ 784 struct sh_eth_private *mdp = netdev_priv(ndev); 785 int i; 786 struct sk_buff *skb; 787 struct sh_eth_rxdesc *rxdesc = NULL; 788 struct sh_eth_txdesc *txdesc = NULL; 789 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; 790 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; 791 792 mdp->cur_rx = mdp->cur_tx = 0; 793 mdp->dirty_rx = mdp->dirty_tx = 0; 794 795 memset(mdp->rx_ring, 0, rx_ringsize); 796 797 /* build Rx ring buffer */ 798 for (i = 0; i < RX_RING_SIZE; i++) { 799 /* skb */ 800 mdp->rx_skbuff[i] = NULL; 801 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 802 mdp->rx_skbuff[i] = skb; 803 if (skb == NULL) 804 break; 805 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 806 DMA_FROM_DEVICE); 807 sh_eth_set_receive_align(skb); 808 809 /* RX descriptor */ 810 rxdesc = &mdp->rx_ring[i]; 811 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 812 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 813 814 /* The size of the buffer is 16 byte boundary. */ 815 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 816 /* Rx descriptor address set */ 817 if (i == 0) { 818 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 819 if (sh_eth_is_gether(mdp)) 820 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); 821 } 822 } 823 824 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 825 826 /* Mark the last entry as wrapping the ring. */ 827 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 828 829 memset(mdp->tx_ring, 0, tx_ringsize); 830 831 /* build Tx ring buffer */ 832 for (i = 0; i < TX_RING_SIZE; i++) { 833 mdp->tx_skbuff[i] = NULL; 834 txdesc = &mdp->tx_ring[i]; 835 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 836 txdesc->buffer_length = 0; 837 if (i == 0) { 838 /* Tx descriptor address set */ 839 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); 840 if (sh_eth_is_gether(mdp)) 841 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); 842 } 843 } 844 845 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 846} 847 848/* Get skb and descriptor buffer */ 849static int sh_eth_ring_init(struct net_device *ndev) 850{ 851 struct sh_eth_private *mdp = netdev_priv(ndev); 852 int rx_ringsize, tx_ringsize, ret = 0; 853 854 /* 855 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 856 * card needs room to do 8 byte alignment, +2 so we can reserve 857 * the first 2 bytes, and +16 gets room for the status word from the 858 * card. 859 */ 860 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 861 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 862 if (mdp->cd->rpadir) 863 mdp->rx_buf_sz += NET_IP_ALIGN; 864 865 /* Allocate RX and TX skb rings */ 866 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, 867 GFP_KERNEL); 868 if (!mdp->rx_skbuff) { 869 dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); 870 ret = -ENOMEM; 871 return ret; 872 } 873 874 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, 875 GFP_KERNEL); 876 if (!mdp->tx_skbuff) { 877 dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); 878 ret = -ENOMEM; 879 goto skb_ring_free; 880 } 881 882 /* Allocate all Rx descriptors. */ 883 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 884 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 885 GFP_KERNEL); 886 887 if (!mdp->rx_ring) { 888 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n", 889 rx_ringsize); 890 ret = -ENOMEM; 891 goto desc_ring_free; 892 } 893 894 mdp->dirty_rx = 0; 895 896 /* Allocate all Tx descriptors. */ 897 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 898 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 899 GFP_KERNEL); 900 if (!mdp->tx_ring) { 901 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n", 902 tx_ringsize); 903 ret = -ENOMEM; 904 goto desc_ring_free; 905 } 906 return ret; 907 908desc_ring_free: 909 /* free DMA buffer */ 910 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 911 912skb_ring_free: 913 /* Free Rx and Tx skb ring buffer */ 914 sh_eth_ring_free(ndev); 915 916 return ret; 917} 918 919static int sh_eth_dev_init(struct net_device *ndev) 920{ 921 int ret = 0; 922 struct sh_eth_private *mdp = netdev_priv(ndev); 923 u_int32_t rx_int_var, tx_int_var; 924 u32 val; 925 926 /* Soft Reset */ 927 sh_eth_reset(ndev); 928 929 /* Descriptor format */ 930 sh_eth_ring_format(ndev); 931 if (mdp->cd->rpadir) 932 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); 933 934 /* all sh_eth int mask */ 935 sh_eth_write(ndev, 0, EESIPR); 936 937#if defined(__LITTLE_ENDIAN) 938 if (mdp->cd->hw_swap) 939 sh_eth_write(ndev, EDMR_EL, EDMR); 940 else 941#endif 942 sh_eth_write(ndev, 0, EDMR); 943 944 /* FIFO size set */ 945 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); 946 sh_eth_write(ndev, 0, TFTR); 947 948 /* Frame recv control */ 949 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); 950 951 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 952 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 953 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER); 954 955 if (mdp->cd->bculr) 956 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ 957 958 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); 959 960 if (!mdp->cd->no_trimd) 961 sh_eth_write(ndev, 0, TRIMD); 962 963 /* Recv frame limit set register */ 964 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 965 RFLR); 966 967 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 968 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 969 970 /* PAUSE Prohibition */ 971 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 972 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 973 974 sh_eth_write(ndev, val, ECMR); 975 976 if (mdp->cd->set_rate) 977 mdp->cd->set_rate(ndev); 978 979 /* E-MAC Status Register clear */ 980 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); 981 982 /* E-MAC Interrupt Enable register */ 983 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); 984 985 /* Set MAC address */ 986 update_mac_address(ndev); 987 988 /* mask reset */ 989 if (mdp->cd->apr) 990 sh_eth_write(ndev, APR_AP, APR); 991 if (mdp->cd->mpr) 992 sh_eth_write(ndev, MPR_MP, MPR); 993 if (mdp->cd->tpauser) 994 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); 995 996 /* Setting the Rx mode will start the Rx process. */ 997 sh_eth_write(ndev, EDRRR_R, EDRRR); 998 999 netif_start_queue(ndev); 1000 1001 return ret; 1002} 1003 1004/* free Tx skb function */ 1005static int sh_eth_txfree(struct net_device *ndev) 1006{ 1007 struct sh_eth_private *mdp = netdev_priv(ndev); 1008 struct sh_eth_txdesc *txdesc; 1009 int freeNum = 0; 1010 int entry = 0; 1011 1012 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 1013 entry = mdp->dirty_tx % TX_RING_SIZE; 1014 txdesc = &mdp->tx_ring[entry]; 1015 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1016 break; 1017 /* Free the original skb. */ 1018 if (mdp->tx_skbuff[entry]) { 1019 dma_unmap_single(&ndev->dev, txdesc->addr, 1020 txdesc->buffer_length, DMA_TO_DEVICE); 1021 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1022 mdp->tx_skbuff[entry] = NULL; 1023 freeNum++; 1024 } 1025 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 1026 if (entry >= TX_RING_SIZE - 1) 1027 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 1028 1029 ndev->stats.tx_packets++; 1030 ndev->stats.tx_bytes += txdesc->buffer_length; 1031 } 1032 return freeNum; 1033} 1034 1035/* Packet receive function */ 1036static int sh_eth_rx(struct net_device *ndev, u32 intr_status) 1037{ 1038 struct sh_eth_private *mdp = netdev_priv(ndev); 1039 struct sh_eth_rxdesc *rxdesc; 1040 1041 int entry = mdp->cur_rx % RX_RING_SIZE; 1042 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 1043 struct sk_buff *skb; 1044 u16 pkt_len = 0; 1045 u32 desc_status; 1046 1047 rxdesc = &mdp->rx_ring[entry]; 1048 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1049 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1050 pkt_len = rxdesc->frame_length; 1051 1052#if defined(CONFIG_ARCH_R8A7740) 1053 desc_status >>= 16; 1054#endif 1055 1056 if (--boguscnt < 0) 1057 break; 1058 1059 if (!(desc_status & RDFEND)) 1060 ndev->stats.rx_length_errors++; 1061 1062 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1063 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1064 ndev->stats.rx_errors++; 1065 if (desc_status & RD_RFS1) 1066 ndev->stats.rx_crc_errors++; 1067 if (desc_status & RD_RFS2) 1068 ndev->stats.rx_frame_errors++; 1069 if (desc_status & RD_RFS3) 1070 ndev->stats.rx_length_errors++; 1071 if (desc_status & RD_RFS4) 1072 ndev->stats.rx_length_errors++; 1073 if (desc_status & RD_RFS6) 1074 ndev->stats.rx_missed_errors++; 1075 if (desc_status & RD_RFS10) 1076 ndev->stats.rx_over_errors++; 1077 } else { 1078 if (!mdp->cd->hw_swap) 1079 sh_eth_soft_swap( 1080 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1081 pkt_len + 2); 1082 skb = mdp->rx_skbuff[entry]; 1083 mdp->rx_skbuff[entry] = NULL; 1084 if (mdp->cd->rpadir) 1085 skb_reserve(skb, NET_IP_ALIGN); 1086 skb_put(skb, pkt_len); 1087 skb->protocol = eth_type_trans(skb, ndev); 1088 netif_rx(skb); 1089 ndev->stats.rx_packets++; 1090 ndev->stats.rx_bytes += pkt_len; 1091 } 1092 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 1093 entry = (++mdp->cur_rx) % RX_RING_SIZE; 1094 rxdesc = &mdp->rx_ring[entry]; 1095 } 1096 1097 /* Refill the Rx ring buffers. */ 1098 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 1099 entry = mdp->dirty_rx % RX_RING_SIZE; 1100 rxdesc = &mdp->rx_ring[entry]; 1101 /* The size of the buffer is 16 byte boundary. */ 1102 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1103 1104 if (mdp->rx_skbuff[entry] == NULL) { 1105 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1106 mdp->rx_skbuff[entry] = skb; 1107 if (skb == NULL) 1108 break; /* Better luck next round. */ 1109 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, 1110 DMA_FROM_DEVICE); 1111 sh_eth_set_receive_align(skb); 1112 1113 skb_checksum_none_assert(skb); 1114 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1115 } 1116 if (entry >= RX_RING_SIZE - 1) 1117 rxdesc->status |= 1118 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1119 else 1120 rxdesc->status |= 1121 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1122 } 1123 1124 /* Restart Rx engine if stopped. */ 1125 /* If we don't need to check status, don't. -KDU */ 1126 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1127 /* fix the values for the next receiving if RDE is set */ 1128 if (intr_status & EESR_RDE) 1129 mdp->cur_rx = mdp->dirty_rx = 1130 (sh_eth_read(ndev, RDFAR) - 1131 sh_eth_read(ndev, RDLAR)) >> 4; 1132 sh_eth_write(ndev, EDRRR_R, EDRRR); 1133 } 1134 1135 return 0; 1136} 1137 1138static void sh_eth_rcv_snd_disable(struct net_device *ndev) 1139{ 1140 /* disable tx and rx */ 1141 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & 1142 ~(ECMR_RE | ECMR_TE), ECMR); 1143} 1144 1145static void sh_eth_rcv_snd_enable(struct net_device *ndev) 1146{ 1147 /* enable tx and rx */ 1148 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | 1149 (ECMR_RE | ECMR_TE), ECMR); 1150} 1151 1152/* error control function */ 1153static void sh_eth_error(struct net_device *ndev, int intr_status) 1154{ 1155 struct sh_eth_private *mdp = netdev_priv(ndev); 1156 u32 felic_stat; 1157 u32 link_stat; 1158 u32 mask; 1159 1160 if (intr_status & EESR_ECI) { 1161 felic_stat = sh_eth_read(ndev, ECSR); 1162 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ 1163 if (felic_stat & ECSR_ICD) 1164 ndev->stats.tx_carrier_errors++; 1165 if (felic_stat & ECSR_LCHNG) { 1166 /* Link Changed */ 1167 if (mdp->cd->no_psr || mdp->no_ether_link) { 1168 if (mdp->link == PHY_DOWN) 1169 link_stat = 0; 1170 else 1171 link_stat = PHY_ST_LINK; 1172 } else { 1173 link_stat = (sh_eth_read(ndev, PSR)); 1174 if (mdp->ether_link_active_low) 1175 link_stat = ~link_stat; 1176 } 1177 if (!(link_stat & PHY_ST_LINK)) 1178 sh_eth_rcv_snd_disable(ndev); 1179 else { 1180 /* Link Up */ 1181 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & 1182 ~DMAC_M_ECI, EESIPR); 1183 /*clear int */ 1184 sh_eth_write(ndev, sh_eth_read(ndev, ECSR), 1185 ECSR); 1186 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | 1187 DMAC_M_ECI, EESIPR); 1188 /* enable tx and rx */ 1189 sh_eth_rcv_snd_enable(ndev); 1190 } 1191 } 1192 } 1193 1194 if (intr_status & EESR_TWB) { 1195 /* Write buck end. unused write back interrupt */ 1196 if (intr_status & EESR_TABT) /* Transmit Abort int */ 1197 ndev->stats.tx_aborted_errors++; 1198 if (netif_msg_tx_err(mdp)) 1199 dev_err(&ndev->dev, "Transmit Abort\n"); 1200 } 1201 1202 if (intr_status & EESR_RABT) { 1203 /* Receive Abort int */ 1204 if (intr_status & EESR_RFRMER) { 1205 /* Receive Frame Overflow int */ 1206 ndev->stats.rx_frame_errors++; 1207 if (netif_msg_rx_err(mdp)) 1208 dev_err(&ndev->dev, "Receive Abort\n"); 1209 } 1210 } 1211 1212 if (intr_status & EESR_TDE) { 1213 /* Transmit Descriptor Empty int */ 1214 ndev->stats.tx_fifo_errors++; 1215 if (netif_msg_tx_err(mdp)) 1216 dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 1217 } 1218 1219 if (intr_status & EESR_TFE) { 1220 /* FIFO under flow */ 1221 ndev->stats.tx_fifo_errors++; 1222 if (netif_msg_tx_err(mdp)) 1223 dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 1224 } 1225 1226 if (intr_status & EESR_RDE) { 1227 /* Receive Descriptor Empty int */ 1228 ndev->stats.rx_over_errors++; 1229 1230 if (netif_msg_rx_err(mdp)) 1231 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1232 } 1233 1234 if (intr_status & EESR_RFE) { 1235 /* Receive FIFO Overflow int */ 1236 ndev->stats.rx_fifo_errors++; 1237 if (netif_msg_rx_err(mdp)) 1238 dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 1239 } 1240 1241 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1242 /* Address Error */ 1243 ndev->stats.tx_fifo_errors++; 1244 if (netif_msg_tx_err(mdp)) 1245 dev_err(&ndev->dev, "Address Error\n"); 1246 } 1247 1248 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1249 if (mdp->cd->no_ade) 1250 mask &= ~EESR_ADE; 1251 if (intr_status & mask) { 1252 /* Tx error */ 1253 u32 edtrr = sh_eth_read(ndev, EDTRR); 1254 /* dmesg */ 1255 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 1256 intr_status, mdp->cur_tx); 1257 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1258 mdp->dirty_tx, (u32) ndev->state, edtrr); 1259 /* dirty buffer free */ 1260 sh_eth_txfree(ndev); 1261 1262 /* SH7712 BUG */ 1263 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { 1264 /* tx dma start */ 1265 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1266 } 1267 /* wakeup */ 1268 netif_wake_queue(ndev); 1269 } 1270} 1271 1272static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 1273{ 1274 struct net_device *ndev = netdev; 1275 struct sh_eth_private *mdp = netdev_priv(ndev); 1276 struct sh_eth_cpu_data *cd = mdp->cd; 1277 irqreturn_t ret = IRQ_NONE; 1278 u32 intr_status = 0; 1279 1280 spin_lock(&mdp->lock); 1281 1282 /* Get interrpt stat */ 1283 intr_status = sh_eth_read(ndev, EESR); 1284 /* Clear interrupt */ 1285 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 1286 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 1287 cd->tx_check | cd->eesr_err_check)) { 1288 sh_eth_write(ndev, intr_status, EESR); 1289 ret = IRQ_HANDLED; 1290 } else 1291 goto other_irq; 1292 1293 if (intr_status & (EESR_FRC | /* Frame recv*/ 1294 EESR_RMAF | /* Multi cast address recv*/ 1295 EESR_RRF | /* Bit frame recv */ 1296 EESR_RTLF | /* Long frame recv*/ 1297 EESR_RTSF | /* short frame recv */ 1298 EESR_PRE | /* PHY-LSI recv error */ 1299 EESR_CERF)){ /* recv frame CRC error */ 1300 sh_eth_rx(ndev, intr_status); 1301 } 1302 1303 /* Tx Check */ 1304 if (intr_status & cd->tx_check) { 1305 sh_eth_txfree(ndev); 1306 netif_wake_queue(ndev); 1307 } 1308 1309 if (intr_status & cd->eesr_err_check) 1310 sh_eth_error(ndev, intr_status); 1311 1312other_irq: 1313 spin_unlock(&mdp->lock); 1314 1315 return ret; 1316} 1317 1318static void sh_eth_timer(unsigned long data) 1319{ 1320 struct net_device *ndev = (struct net_device *)data; 1321 struct sh_eth_private *mdp = netdev_priv(ndev); 1322 1323 mod_timer(&mdp->timer, jiffies + (10 * HZ)); 1324} 1325 1326/* PHY state control function */ 1327static void sh_eth_adjust_link(struct net_device *ndev) 1328{ 1329 struct sh_eth_private *mdp = netdev_priv(ndev); 1330 struct phy_device *phydev = mdp->phydev; 1331 int new_state = 0; 1332 1333 if (phydev->link != PHY_DOWN) { 1334 if (phydev->duplex != mdp->duplex) { 1335 new_state = 1; 1336 mdp->duplex = phydev->duplex; 1337 if (mdp->cd->set_duplex) 1338 mdp->cd->set_duplex(ndev); 1339 } 1340 1341 if (phydev->speed != mdp->speed) { 1342 new_state = 1; 1343 mdp->speed = phydev->speed; 1344 if (mdp->cd->set_rate) 1345 mdp->cd->set_rate(ndev); 1346 } 1347 if (mdp->link == PHY_DOWN) { 1348 sh_eth_write(ndev, 1349 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); 1350 new_state = 1; 1351 mdp->link = phydev->link; 1352 } 1353 } else if (mdp->link) { 1354 new_state = 1; 1355 mdp->link = PHY_DOWN; 1356 mdp->speed = 0; 1357 mdp->duplex = -1; 1358 } 1359 1360 if (new_state && netif_msg_link(mdp)) 1361 phy_print_status(phydev); 1362} 1363 1364/* PHY init function */ 1365static int sh_eth_phy_init(struct net_device *ndev) 1366{ 1367 struct sh_eth_private *mdp = netdev_priv(ndev); 1368 char phy_id[MII_BUS_ID_SIZE + 3]; 1369 struct phy_device *phydev = NULL; 1370 1371 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 1372 mdp->mii_bus->id , mdp->phy_id); 1373 1374 mdp->link = PHY_DOWN; 1375 mdp->speed = 0; 1376 mdp->duplex = -1; 1377 1378 /* Try connect to PHY */ 1379 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1380 0, mdp->phy_interface); 1381 if (IS_ERR(phydev)) { 1382 dev_err(&ndev->dev, "phy_connect failed\n"); 1383 return PTR_ERR(phydev); 1384 } 1385 1386 dev_info(&ndev->dev, "attached phy %i to driver %s\n", 1387 phydev->addr, phydev->drv->name); 1388 1389 mdp->phydev = phydev; 1390 1391 return 0; 1392} 1393 1394/* PHY control start function */ 1395static int sh_eth_phy_start(struct net_device *ndev) 1396{ 1397 struct sh_eth_private *mdp = netdev_priv(ndev); 1398 int ret; 1399 1400 ret = sh_eth_phy_init(ndev); 1401 if (ret) 1402 return ret; 1403 1404 /* reset phy - this also wakes it from PDOWN */ 1405 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); 1406 phy_start(mdp->phydev); 1407 1408 return 0; 1409} 1410 1411static int sh_eth_get_settings(struct net_device *ndev, 1412 struct ethtool_cmd *ecmd) 1413{ 1414 struct sh_eth_private *mdp = netdev_priv(ndev); 1415 unsigned long flags; 1416 int ret; 1417 1418 spin_lock_irqsave(&mdp->lock, flags); 1419 ret = phy_ethtool_gset(mdp->phydev, ecmd); 1420 spin_unlock_irqrestore(&mdp->lock, flags); 1421 1422 return ret; 1423} 1424 1425static int sh_eth_set_settings(struct net_device *ndev, 1426 struct ethtool_cmd *ecmd) 1427{ 1428 struct sh_eth_private *mdp = netdev_priv(ndev); 1429 unsigned long flags; 1430 int ret; 1431 1432 spin_lock_irqsave(&mdp->lock, flags); 1433 1434 /* disable tx and rx */ 1435 sh_eth_rcv_snd_disable(ndev); 1436 1437 ret = phy_ethtool_sset(mdp->phydev, ecmd); 1438 if (ret) 1439 goto error_exit; 1440 1441 if (ecmd->duplex == DUPLEX_FULL) 1442 mdp->duplex = 1; 1443 else 1444 mdp->duplex = 0; 1445 1446 if (mdp->cd->set_duplex) 1447 mdp->cd->set_duplex(ndev); 1448 1449error_exit: 1450 mdelay(1); 1451 1452 /* enable tx and rx */ 1453 sh_eth_rcv_snd_enable(ndev); 1454 1455 spin_unlock_irqrestore(&mdp->lock, flags); 1456 1457 return ret; 1458} 1459 1460static int sh_eth_nway_reset(struct net_device *ndev) 1461{ 1462 struct sh_eth_private *mdp = netdev_priv(ndev); 1463 unsigned long flags; 1464 int ret; 1465 1466 spin_lock_irqsave(&mdp->lock, flags); 1467 ret = phy_start_aneg(mdp->phydev); 1468 spin_unlock_irqrestore(&mdp->lock, flags); 1469 1470 return ret; 1471} 1472 1473static u32 sh_eth_get_msglevel(struct net_device *ndev) 1474{ 1475 struct sh_eth_private *mdp = netdev_priv(ndev); 1476 return mdp->msg_enable; 1477} 1478 1479static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1480{ 1481 struct sh_eth_private *mdp = netdev_priv(ndev); 1482 mdp->msg_enable = value; 1483} 1484 1485static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1486 "rx_current", "tx_current", 1487 "rx_dirty", "tx_dirty", 1488}; 1489#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1490 1491static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1492{ 1493 switch (sset) { 1494 case ETH_SS_STATS: 1495 return SH_ETH_STATS_LEN; 1496 default: 1497 return -EOPNOTSUPP; 1498 } 1499} 1500 1501static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1502 struct ethtool_stats *stats, u64 *data) 1503{ 1504 struct sh_eth_private *mdp = netdev_priv(ndev); 1505 int i = 0; 1506 1507 /* device-specific stats */ 1508 data[i++] = mdp->cur_rx; 1509 data[i++] = mdp->cur_tx; 1510 data[i++] = mdp->dirty_rx; 1511 data[i++] = mdp->dirty_tx; 1512} 1513 1514static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1515{ 1516 switch (stringset) { 1517 case ETH_SS_STATS: 1518 memcpy(data, *sh_eth_gstrings_stats, 1519 sizeof(sh_eth_gstrings_stats)); 1520 break; 1521 } 1522} 1523 1524static const struct ethtool_ops sh_eth_ethtool_ops = { 1525 .get_settings = sh_eth_get_settings, 1526 .set_settings = sh_eth_set_settings, 1527 .nway_reset = sh_eth_nway_reset, 1528 .get_msglevel = sh_eth_get_msglevel, 1529 .set_msglevel = sh_eth_set_msglevel, 1530 .get_link = ethtool_op_get_link, 1531 .get_strings = sh_eth_get_strings, 1532 .get_ethtool_stats = sh_eth_get_ethtool_stats, 1533 .get_sset_count = sh_eth_get_sset_count, 1534}; 1535 1536/* network device open function */ 1537static int sh_eth_open(struct net_device *ndev) 1538{ 1539 int ret = 0; 1540 struct sh_eth_private *mdp = netdev_priv(ndev); 1541 1542 pm_runtime_get_sync(&mdp->pdev->dev); 1543 1544 ret = request_irq(ndev->irq, sh_eth_interrupt, 1545#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1546 defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1547 defined(CONFIG_CPU_SUBTYPE_SH7757) 1548 IRQF_SHARED, 1549#else 1550 0, 1551#endif 1552 ndev->name, ndev); 1553 if (ret) { 1554 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 1555 return ret; 1556 } 1557 1558 /* Descriptor set */ 1559 ret = sh_eth_ring_init(ndev); 1560 if (ret) 1561 goto out_free_irq; 1562 1563 /* device init */ 1564 ret = sh_eth_dev_init(ndev); 1565 if (ret) 1566 goto out_free_irq; 1567 1568 /* PHY control start*/ 1569 ret = sh_eth_phy_start(ndev); 1570 if (ret) 1571 goto out_free_irq; 1572 1573 /* Set the timer to check for link beat. */ 1574 init_timer(&mdp->timer); 1575 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 1576 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev); 1577 1578 return ret; 1579 1580out_free_irq: 1581 free_irq(ndev->irq, ndev); 1582 pm_runtime_put_sync(&mdp->pdev->dev); 1583 return ret; 1584} 1585 1586/* Timeout function */ 1587static void sh_eth_tx_timeout(struct net_device *ndev) 1588{ 1589 struct sh_eth_private *mdp = netdev_priv(ndev); 1590 struct sh_eth_rxdesc *rxdesc; 1591 int i; 1592 1593 netif_stop_queue(ndev); 1594 1595 if (netif_msg_timer(mdp)) 1596 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," 1597 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); 1598 1599 /* tx_errors count up */ 1600 ndev->stats.tx_errors++; 1601 1602 /* timer off */ 1603 del_timer_sync(&mdp->timer); 1604 1605 /* Free all the skbuffs in the Rx queue. */ 1606 for (i = 0; i < RX_RING_SIZE; i++) { 1607 rxdesc = &mdp->rx_ring[i]; 1608 rxdesc->status = 0; 1609 rxdesc->addr = 0xBADF00D0; 1610 if (mdp->rx_skbuff[i]) 1611 dev_kfree_skb(mdp->rx_skbuff[i]); 1612 mdp->rx_skbuff[i] = NULL; 1613 } 1614 for (i = 0; i < TX_RING_SIZE; i++) { 1615 if (mdp->tx_skbuff[i]) 1616 dev_kfree_skb(mdp->tx_skbuff[i]); 1617 mdp->tx_skbuff[i] = NULL; 1618 } 1619 1620 /* device init */ 1621 sh_eth_dev_init(ndev); 1622 1623 /* timer on */ 1624 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 1625 add_timer(&mdp->timer); 1626} 1627 1628/* Packet transmit function */ 1629static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1630{ 1631 struct sh_eth_private *mdp = netdev_priv(ndev); 1632 struct sh_eth_txdesc *txdesc; 1633 u32 entry; 1634 unsigned long flags; 1635 1636 spin_lock_irqsave(&mdp->lock, flags); 1637 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1638 if (!sh_eth_txfree(ndev)) { 1639 if (netif_msg_tx_queued(mdp)) 1640 dev_warn(&ndev->dev, "TxFD exhausted.\n"); 1641 netif_stop_queue(ndev); 1642 spin_unlock_irqrestore(&mdp->lock, flags); 1643 return NETDEV_TX_BUSY; 1644 } 1645 } 1646 spin_unlock_irqrestore(&mdp->lock, flags); 1647 1648 entry = mdp->cur_tx % TX_RING_SIZE; 1649 mdp->tx_skbuff[entry] = skb; 1650 txdesc = &mdp->tx_ring[entry]; 1651 /* soft swap. */ 1652 if (!mdp->cd->hw_swap) 1653 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 1654 skb->len + 2); 1655 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 1656 DMA_TO_DEVICE); 1657 if (skb->len < ETHERSMALL) 1658 txdesc->buffer_length = ETHERSMALL; 1659 else 1660 txdesc->buffer_length = skb->len; 1661 1662 if (entry >= TX_RING_SIZE - 1) 1663 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 1664 else 1665 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 1666 1667 mdp->cur_tx++; 1668 1669 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) 1670 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); 1671 1672 return NETDEV_TX_OK; 1673} 1674 1675/* device close function */ 1676static int sh_eth_close(struct net_device *ndev) 1677{ 1678 struct sh_eth_private *mdp = netdev_priv(ndev); 1679 int ringsize; 1680 1681 netif_stop_queue(ndev); 1682 1683 /* Disable interrupts by clearing the interrupt mask. */ 1684 sh_eth_write(ndev, 0x0000, EESIPR); 1685 1686 /* Stop the chip's Tx and Rx processes. */ 1687 sh_eth_write(ndev, 0, EDTRR); 1688 sh_eth_write(ndev, 0, EDRRR); 1689 1690 /* PHY Disconnect */ 1691 if (mdp->phydev) { 1692 phy_stop(mdp->phydev); 1693 phy_disconnect(mdp->phydev); 1694 } 1695 1696 free_irq(ndev->irq, ndev); 1697 1698 del_timer_sync(&mdp->timer); 1699 1700 /* Free all the skbuffs in the Rx queue. */ 1701 sh_eth_ring_free(ndev); 1702 1703 /* free DMA buffer */ 1704 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 1705 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1706 1707 /* free DMA buffer */ 1708 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 1709 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); 1710 1711 pm_runtime_put_sync(&mdp->pdev->dev); 1712 1713 return 0; 1714} 1715 1716static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 1717{ 1718 struct sh_eth_private *mdp = netdev_priv(ndev); 1719 1720 pm_runtime_get_sync(&mdp->pdev->dev); 1721 1722 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); 1723 sh_eth_write(ndev, 0, TROCR); /* (write clear) */ 1724 ndev->stats.collisions += sh_eth_read(ndev, CDCR); 1725 sh_eth_write(ndev, 0, CDCR); /* (write clear) */ 1726 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); 1727 sh_eth_write(ndev, 0, LCCR); /* (write clear) */ 1728 if (sh_eth_is_gether(mdp)) { 1729 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); 1730 sh_eth_write(ndev, 0, CERCR); /* (write clear) */ 1731 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); 1732 sh_eth_write(ndev, 0, CEECR); /* (write clear) */ 1733 } else { 1734 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); 1735 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ 1736 } 1737 pm_runtime_put_sync(&mdp->pdev->dev); 1738 1739 return &ndev->stats; 1740} 1741 1742/* ioctl to device function */ 1743static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, 1744 int cmd) 1745{ 1746 struct sh_eth_private *mdp = netdev_priv(ndev); 1747 struct phy_device *phydev = mdp->phydev; 1748 1749 if (!netif_running(ndev)) 1750 return -EINVAL; 1751 1752 if (!phydev) 1753 return -ENODEV; 1754 1755 return phy_mii_ioctl(phydev, rq, cmd); 1756} 1757 1758#if defined(SH_ETH_HAS_TSU) 1759/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ 1760static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, 1761 int entry) 1762{ 1763 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); 1764} 1765 1766static u32 sh_eth_tsu_get_post_mask(int entry) 1767{ 1768 return 0x0f << (28 - ((entry % 8) * 4)); 1769} 1770 1771static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) 1772{ 1773 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); 1774} 1775 1776static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, 1777 int entry) 1778{ 1779 struct sh_eth_private *mdp = netdev_priv(ndev); 1780 u32 tmp; 1781 void *reg_offset; 1782 1783 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 1784 tmp = ioread32(reg_offset); 1785 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); 1786} 1787 1788static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, 1789 int entry) 1790{ 1791 struct sh_eth_private *mdp = netdev_priv(ndev); 1792 u32 post_mask, ref_mask, tmp; 1793 void *reg_offset; 1794 1795 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); 1796 post_mask = sh_eth_tsu_get_post_mask(entry); 1797 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; 1798 1799 tmp = ioread32(reg_offset); 1800 iowrite32(tmp & ~post_mask, reg_offset); 1801 1802 /* If other port enables, the function returns "true" */ 1803 return tmp & ref_mask; 1804} 1805 1806static int sh_eth_tsu_busy(struct net_device *ndev) 1807{ 1808 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100; 1809 struct sh_eth_private *mdp = netdev_priv(ndev); 1810 1811 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { 1812 udelay(10); 1813 timeout--; 1814 if (timeout <= 0) { 1815 dev_err(&ndev->dev, "%s: timeout\n", __func__); 1816 return -ETIMEDOUT; 1817 } 1818 } 1819 1820 return 0; 1821} 1822 1823static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg, 1824 const u8 *addr) 1825{ 1826 u32 val; 1827 1828 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3]; 1829 iowrite32(val, reg); 1830 if (sh_eth_tsu_busy(ndev) < 0) 1831 return -EBUSY; 1832 1833 val = addr[4] << 8 | addr[5]; 1834 iowrite32(val, reg + 4); 1835 if (sh_eth_tsu_busy(ndev) < 0) 1836 return -EBUSY; 1837 1838 return 0; 1839} 1840 1841static void sh_eth_tsu_read_entry(void *reg, u8 *addr) 1842{ 1843 u32 val; 1844 1845 val = ioread32(reg); 1846 addr[0] = (val >> 24) & 0xff; 1847 addr[1] = (val >> 16) & 0xff; 1848 addr[2] = (val >> 8) & 0xff; 1849 addr[3] = val & 0xff; 1850 val = ioread32(reg + 4); 1851 addr[4] = (val >> 8) & 0xff; 1852 addr[5] = val & 0xff; 1853} 1854 1855 1856static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr) 1857{ 1858 struct sh_eth_private *mdp = netdev_priv(ndev); 1859 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1860 int i; 1861 u8 c_addr[ETH_ALEN]; 1862 1863 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 1864 sh_eth_tsu_read_entry(reg_offset, c_addr); 1865 if (memcmp(addr, c_addr, ETH_ALEN) == 0) 1866 return i; 1867 } 1868 1869 return -ENOENT; 1870} 1871 1872static int sh_eth_tsu_find_empty(struct net_device *ndev) 1873{ 1874 u8 blank[ETH_ALEN]; 1875 int entry; 1876 1877 memset(blank, 0, sizeof(blank)); 1878 entry = sh_eth_tsu_find_entry(ndev, blank); 1879 return (entry < 0) ? -ENOMEM : entry; 1880} 1881 1882static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev, 1883 int entry) 1884{ 1885 struct sh_eth_private *mdp = netdev_priv(ndev); 1886 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1887 int ret; 1888 u8 blank[ETH_ALEN]; 1889 1890 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & 1891 ~(1 << (31 - entry)), TSU_TEN); 1892 1893 memset(blank, 0, sizeof(blank)); 1894 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank); 1895 if (ret < 0) 1896 return ret; 1897 return 0; 1898} 1899 1900static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr) 1901{ 1902 struct sh_eth_private *mdp = netdev_priv(ndev); 1903 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1904 int i, ret; 1905 1906 if (!mdp->cd->tsu) 1907 return 0; 1908 1909 i = sh_eth_tsu_find_entry(ndev, addr); 1910 if (i < 0) { 1911 /* No entry found, create one */ 1912 i = sh_eth_tsu_find_empty(ndev); 1913 if (i < 0) 1914 return -ENOMEM; 1915 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr); 1916 if (ret < 0) 1917 return ret; 1918 1919 /* Enable the entry */ 1920 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | 1921 (1 << (31 - i)), TSU_TEN); 1922 } 1923 1924 /* Entry found or created, enable POST */ 1925 sh_eth_tsu_enable_cam_entry_post(ndev, i); 1926 1927 return 0; 1928} 1929 1930static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr) 1931{ 1932 struct sh_eth_private *mdp = netdev_priv(ndev); 1933 int i, ret; 1934 1935 if (!mdp->cd->tsu) 1936 return 0; 1937 1938 i = sh_eth_tsu_find_entry(ndev, addr); 1939 if (i) { 1940 /* Entry found */ 1941 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 1942 goto done; 1943 1944 /* Disable the entry if both ports was disabled */ 1945 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 1946 if (ret < 0) 1947 return ret; 1948 } 1949done: 1950 return 0; 1951} 1952 1953static int sh_eth_tsu_purge_all(struct net_device *ndev) 1954{ 1955 struct sh_eth_private *mdp = netdev_priv(ndev); 1956 int i, ret; 1957 1958 if (unlikely(!mdp->cd->tsu)) 1959 return 0; 1960 1961 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { 1962 if (sh_eth_tsu_disable_cam_entry_post(ndev, i)) 1963 continue; 1964 1965 /* Disable the entry if both ports was disabled */ 1966 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i); 1967 if (ret < 0) 1968 return ret; 1969 } 1970 1971 return 0; 1972} 1973 1974static void sh_eth_tsu_purge_mcast(struct net_device *ndev) 1975{ 1976 struct sh_eth_private *mdp = netdev_priv(ndev); 1977 u8 addr[ETH_ALEN]; 1978 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); 1979 int i; 1980 1981 if (unlikely(!mdp->cd->tsu)) 1982 return; 1983 1984 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { 1985 sh_eth_tsu_read_entry(reg_offset, addr); 1986 if (is_multicast_ether_addr(addr)) 1987 sh_eth_tsu_del_entry(ndev, addr); 1988 } 1989} 1990 1991/* Multicast reception directions set */ 1992static void sh_eth_set_multicast_list(struct net_device *ndev) 1993{ 1994 struct sh_eth_private *mdp = netdev_priv(ndev); 1995 u32 ecmr_bits; 1996 int mcast_all = 0; 1997 unsigned long flags; 1998 1999 spin_lock_irqsave(&mdp->lock, flags); 2000 /* 2001 * Initial condition is MCT = 1, PRM = 0. 2002 * Depending on ndev->flags, set PRM or clear MCT 2003 */ 2004 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; 2005 2006 if (!(ndev->flags & IFF_MULTICAST)) { 2007 sh_eth_tsu_purge_mcast(ndev); 2008 mcast_all = 1; 2009 } 2010 if (ndev->flags & IFF_ALLMULTI) { 2011 sh_eth_tsu_purge_mcast(ndev); 2012 ecmr_bits &= ~ECMR_MCT; 2013 mcast_all = 1; 2014 } 2015 2016 if (ndev->flags & IFF_PROMISC) { 2017 sh_eth_tsu_purge_all(ndev); 2018 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM; 2019 } else if (mdp->cd->tsu) { 2020 struct netdev_hw_addr *ha; 2021 netdev_for_each_mc_addr(ha, ndev) { 2022 if (mcast_all && is_multicast_ether_addr(ha->addr)) 2023 continue; 2024 2025 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) { 2026 if (!mcast_all) { 2027 sh_eth_tsu_purge_mcast(ndev); 2028 ecmr_bits &= ~ECMR_MCT; 2029 mcast_all = 1; 2030 } 2031 } 2032 } 2033 } else { 2034 /* Normal, unicast/broadcast-only mode. */ 2035 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; 2036 } 2037 2038 /* update the ethernet mode */ 2039 sh_eth_write(ndev, ecmr_bits, ECMR); 2040 2041 spin_unlock_irqrestore(&mdp->lock, flags); 2042} 2043 2044static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) 2045{ 2046 if (!mdp->port) 2047 return TSU_VTAG0; 2048 else 2049 return TSU_VTAG1; 2050} 2051 2052static int sh_eth_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 2053{ 2054 struct sh_eth_private *mdp = netdev_priv(ndev); 2055 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2056 2057 if (unlikely(!mdp->cd->tsu)) 2058 return -EPERM; 2059 2060 /* No filtering if vid = 0 */ 2061 if (!vid) 2062 return 0; 2063 2064 mdp->vlan_num_ids++; 2065 2066 /* 2067 * The controller has one VLAN tag HW filter. So, if the filter is 2068 * already enabled, the driver disables it and the filte 2069 */ 2070 if (mdp->vlan_num_ids > 1) { 2071 /* disable VLAN filter */ 2072 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2073 return 0; 2074 } 2075 2076 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), 2077 vtag_reg_index); 2078 2079 return 0; 2080} 2081 2082static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 2083{ 2084 struct sh_eth_private *mdp = netdev_priv(ndev); 2085 int vtag_reg_index = sh_eth_get_vtag_index(mdp); 2086 2087 if (unlikely(!mdp->cd->tsu)) 2088 return -EPERM; 2089 2090 /* No filtering if vid = 0 */ 2091 if (!vid) 2092 return 0; 2093 2094 mdp->vlan_num_ids--; 2095 sh_eth_tsu_write(mdp, 0, vtag_reg_index); 2096 2097 return 0; 2098} 2099#endif /* SH_ETH_HAS_TSU */ 2100 2101/* SuperH's TSU register init function */ 2102static void sh_eth_tsu_init(struct sh_eth_private *mdp) 2103{ 2104 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ 2105 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ 2106 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ 2107 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); 2108 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); 2109 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); 2110 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); 2111 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); 2112 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); 2113 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); 2114 if (sh_eth_is_gether(mdp)) { 2115 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ 2116 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ 2117 } else { 2118 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ 2119 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ 2120 } 2121 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ 2122 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ 2123 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ 2124 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ 2125 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ 2126 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ 2127 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ 2128} 2129 2130/* MDIO bus release function */ 2131static int sh_mdio_release(struct net_device *ndev) 2132{ 2133 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2134 2135 /* unregister mdio bus */ 2136 mdiobus_unregister(bus); 2137 2138 /* remove mdio bus info from net_device */ 2139 dev_set_drvdata(&ndev->dev, NULL); 2140 2141 /* free interrupts memory */ 2142 kfree(bus->irq); 2143 2144 /* free bitbang info */ 2145 free_mdio_bitbang(bus); 2146 2147 return 0; 2148} 2149 2150/* MDIO bus init function */ 2151static int sh_mdio_init(struct net_device *ndev, int id, 2152 struct sh_eth_plat_data *pd) 2153{ 2154 int ret, i; 2155 struct bb_info *bitbang; 2156 struct sh_eth_private *mdp = netdev_priv(ndev); 2157 2158 /* create bit control struct for PHY */ 2159 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); 2160 if (!bitbang) { 2161 ret = -ENOMEM; 2162 goto out; 2163 } 2164 2165 /* bitbang init */ 2166 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2167 bitbang->set_gate = pd->set_mdio_gate; 2168 bitbang->mdi_msk = 0x08; 2169 bitbang->mdo_msk = 0x04; 2170 bitbang->mmd_msk = 0x02;/* MMD */ 2171 bitbang->mdc_msk = 0x01; 2172 bitbang->ctrl.ops = &bb_ops; 2173 2174 /* MII controller setting */ 2175 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2176 if (!mdp->mii_bus) { 2177 ret = -ENOMEM; 2178 goto out_free_bitbang; 2179 } 2180 2181 /* Hook up MII support for ethtool */ 2182 mdp->mii_bus->name = "sh_mii"; 2183 mdp->mii_bus->parent = &ndev->dev; 2184 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2185 mdp->pdev->name, id); 2186 2187 /* PHY IRQ */ 2188 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 2189 if (!mdp->mii_bus->irq) { 2190 ret = -ENOMEM; 2191 goto out_free_bus; 2192 } 2193 2194 for (i = 0; i < PHY_MAX_ADDR; i++) 2195 mdp->mii_bus->irq[i] = PHY_POLL; 2196 2197 /* regist mdio bus */ 2198 ret = mdiobus_register(mdp->mii_bus); 2199 if (ret) 2200 goto out_free_irq; 2201 2202 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2203 2204 return 0; 2205 2206out_free_irq: 2207 kfree(mdp->mii_bus->irq); 2208 2209out_free_bus: 2210 free_mdio_bitbang(mdp->mii_bus); 2211 2212out_free_bitbang: 2213 kfree(bitbang); 2214 2215out: 2216 return ret; 2217} 2218 2219static const u16 *sh_eth_get_register_offset(int register_type) 2220{ 2221 const u16 *reg_offset = NULL; 2222 2223 switch (register_type) { 2224 case SH_ETH_REG_GIGABIT: 2225 reg_offset = sh_eth_offset_gigabit; 2226 break; 2227 case SH_ETH_REG_FAST_SH4: 2228 reg_offset = sh_eth_offset_fast_sh4; 2229 break; 2230 case SH_ETH_REG_FAST_SH3_SH2: 2231 reg_offset = sh_eth_offset_fast_sh3_sh2; 2232 break; 2233 default: 2234 printk(KERN_ERR "Unknown register type (%d)\n", register_type); 2235 break; 2236 } 2237 2238 return reg_offset; 2239} 2240 2241static const struct net_device_ops sh_eth_netdev_ops = { 2242 .ndo_open = sh_eth_open, 2243 .ndo_stop = sh_eth_close, 2244 .ndo_start_xmit = sh_eth_start_xmit, 2245 .ndo_get_stats = sh_eth_get_stats, 2246#if defined(SH_ETH_HAS_TSU) 2247 .ndo_set_rx_mode = sh_eth_set_multicast_list, 2248 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, 2249 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, 2250#endif 2251 .ndo_tx_timeout = sh_eth_tx_timeout, 2252 .ndo_do_ioctl = sh_eth_do_ioctl, 2253 .ndo_validate_addr = eth_validate_addr, 2254 .ndo_set_mac_address = eth_mac_addr, 2255 .ndo_change_mtu = eth_change_mtu, 2256}; 2257 2258static int sh_eth_drv_probe(struct platform_device *pdev) 2259{ 2260 int ret, devno = 0; 2261 struct resource *res; 2262 struct net_device *ndev = NULL; 2263 struct sh_eth_private *mdp = NULL; 2264 struct sh_eth_plat_data *pd; 2265 2266 /* get base addr */ 2267 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2268 if (unlikely(res == NULL)) { 2269 dev_err(&pdev->dev, "invalid resource\n"); 2270 ret = -EINVAL; 2271 goto out; 2272 } 2273 2274 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2275 if (!ndev) { 2276 ret = -ENOMEM; 2277 goto out; 2278 } 2279 2280 /* The sh Ether-specific entries in the device structure. */ 2281 ndev->base_addr = res->start; 2282 devno = pdev->id; 2283 if (devno < 0) 2284 devno = 0; 2285 2286 ndev->dma = -1; 2287 ret = platform_get_irq(pdev, 0); 2288 if (ret < 0) { 2289 ret = -ENODEV; 2290 goto out_release; 2291 } 2292 ndev->irq = ret; 2293 2294 SET_NETDEV_DEV(ndev, &pdev->dev); 2295 2296 /* Fill in the fields of the device structure with ethernet values. */ 2297 ether_setup(ndev); 2298 2299 mdp = netdev_priv(ndev); 2300 mdp->addr = ioremap(res->start, resource_size(res)); 2301 if (mdp->addr == NULL) { 2302 ret = -ENOMEM; 2303 dev_err(&pdev->dev, "ioremap failed.\n"); 2304 goto out_release; 2305 } 2306 2307 spin_lock_init(&mdp->lock); 2308 mdp->pdev = pdev; 2309 pm_runtime_enable(&pdev->dev); 2310 pm_runtime_resume(&pdev->dev); 2311 2312 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 2313 /* get PHY ID */ 2314 mdp->phy_id = pd->phy; 2315 mdp->phy_interface = pd->phy_interface; 2316 /* EDMAC endian */ 2317 mdp->edmac_endian = pd->edmac_endian; 2318 mdp->no_ether_link = pd->no_ether_link; 2319 mdp->ether_link_active_low = pd->ether_link_active_low; 2320 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); 2321 2322 /* set cpu data */ 2323#if defined(SH_ETH_HAS_BOTH_MODULES) 2324 mdp->cd = sh_eth_get_cpu_data(mdp); 2325#else 2326 mdp->cd = &sh_eth_my_cpu_data; 2327#endif 2328 sh_eth_set_default_cpu_data(mdp->cd); 2329 2330 /* set function */ 2331 ndev->netdev_ops = &sh_eth_netdev_ops; 2332 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 2333 ndev->watchdog_timeo = TX_TIMEOUT; 2334 2335 /* debug message level */ 2336 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 2337 mdp->post_rx = POST_RX >> (devno << 1); 2338 mdp->post_fw = POST_FW >> (devno << 1); 2339 2340 /* read and set MAC address */ 2341 read_mac_address(ndev, pd->mac_addr); 2342 2343 /* ioremap the TSU registers */ 2344 if (mdp->cd->tsu) { 2345 struct resource *rtsu; 2346 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2347 if (!rtsu) { 2348 dev_err(&pdev->dev, "Not found TSU resource\n"); 2349 goto out_release; 2350 } 2351 mdp->tsu_addr = ioremap(rtsu->start, 2352 resource_size(rtsu)); 2353 mdp->port = devno % 2; 2354 ndev->features = NETIF_F_HW_VLAN_FILTER; 2355 } 2356 2357 /* initialize first or needed device */ 2358 if (!devno || pd->needs_init) { 2359 if (mdp->cd->chip_reset) 2360 mdp->cd->chip_reset(ndev); 2361 2362 if (mdp->cd->tsu) { 2363 /* TSU init (Init only)*/ 2364 sh_eth_tsu_init(mdp); 2365 } 2366 } 2367 2368 /* network device register */ 2369 ret = register_netdev(ndev); 2370 if (ret) 2371 goto out_release; 2372 2373 /* mdio bus init */ 2374 ret = sh_mdio_init(ndev, pdev->id, pd); 2375 if (ret) 2376 goto out_unregister; 2377 2378 /* print device information */ 2379 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2380 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2381 2382 platform_set_drvdata(pdev, ndev); 2383 2384 return ret; 2385 2386out_unregister: 2387 unregister_netdev(ndev); 2388 2389out_release: 2390 /* net_dev free */ 2391 if (mdp && mdp->addr) 2392 iounmap(mdp->addr); 2393 if (mdp && mdp->tsu_addr) 2394 iounmap(mdp->tsu_addr); 2395 if (ndev) 2396 free_netdev(ndev); 2397 2398out: 2399 return ret; 2400} 2401 2402static int sh_eth_drv_remove(struct platform_device *pdev) 2403{ 2404 struct net_device *ndev = platform_get_drvdata(pdev); 2405 struct sh_eth_private *mdp = netdev_priv(ndev); 2406 2407 if (mdp->cd->tsu) 2408 iounmap(mdp->tsu_addr); 2409 sh_mdio_release(ndev); 2410 unregister_netdev(ndev); 2411 pm_runtime_disable(&pdev->dev); 2412 iounmap(mdp->addr); 2413 free_netdev(ndev); 2414 platform_set_drvdata(pdev, NULL); 2415 2416 return 0; 2417} 2418 2419static int sh_eth_runtime_nop(struct device *dev) 2420{ 2421 /* 2422 * Runtime PM callback shared between ->runtime_suspend() 2423 * and ->runtime_resume(). Simply returns success. 2424 * 2425 * This driver re-initializes all registers after 2426 * pm_runtime_get_sync() anyway so there is no need 2427 * to save and restore registers here. 2428 */ 2429 return 0; 2430} 2431 2432static struct dev_pm_ops sh_eth_dev_pm_ops = { 2433 .runtime_suspend = sh_eth_runtime_nop, 2434 .runtime_resume = sh_eth_runtime_nop, 2435}; 2436 2437static struct platform_driver sh_eth_driver = { 2438 .probe = sh_eth_drv_probe, 2439 .remove = sh_eth_drv_remove, 2440 .driver = { 2441 .name = CARDNAME, 2442 .pm = &sh_eth_dev_pm_ops, 2443 }, 2444}; 2445 2446module_platform_driver(sh_eth_driver); 2447 2448MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 2449MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 2450MODULE_LICENSE("GPL v2"); 2451