1/* 2 * AMD 10Gb Ethernet PHY driver 3 * 4 * This file is available to you under your choice of the following two 5 * licenses: 6 * 7 * License 1: GPLv2 8 * 9 * Copyright (c) 2014 Advanced Micro Devices, Inc. 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * 25 * License 2: Modified BSD 26 * 27 * Copyright (c) 2014 Advanced Micro Devices, Inc. 28 * All rights reserved. 29 * 30 * Redistribution and use in source and binary forms, with or without 31 * modification, are permitted provided that the following conditions are met: 32 * * Redistributions of source code must retain the above copyright 33 * notice, this list of conditions and the following disclaimer. 34 * * Redistributions in binary form must reproduce the above copyright 35 * notice, this list of conditions and the following disclaimer in the 36 * documentation and/or other materials provided with the distribution. 37 * * Neither the name of Advanced Micro Devices, Inc. nor the 38 * names of its contributors may be used to endorse or promote products 39 * derived from this software without specific prior written permission. 40 * 41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 42 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 44 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 45 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 46 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 48 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 50 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 51 */ 52 53#include <linux/kernel.h> 54#include <linux/device.h> 55#include <linux/platform_device.h> 56#include <linux/string.h> 57#include <linux/errno.h> 58#include <linux/unistd.h> 59#include <linux/slab.h> 60#include <linux/interrupt.h> 61#include <linux/init.h> 62#include <linux/delay.h> 63#include <linux/netdevice.h> 64#include <linux/etherdevice.h> 65#include <linux/skbuff.h> 66#include <linux/mm.h> 67#include <linux/module.h> 68#include <linux/mii.h> 69#include <linux/ethtool.h> 70#include <linux/phy.h> 71#include <linux/mdio.h> 72#include <linux/io.h> 73#include <linux/of.h> 74#include <linux/of_platform.h> 75#include <linux/of_device.h> 76#include <linux/uaccess.h> 77 78MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); 79MODULE_LICENSE("Dual BSD/GPL"); 80MODULE_VERSION("1.0.0-a"); 81MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); 82 83#define XGBE_PHY_ID 0x000162d0 84#define XGBE_PHY_MASK 0xfffffff0 85 86#define XGBE_PHY_SPEEDSET_PROPERTY "amd,speed-set" 87 88#define XGBE_AN_INT_CMPLT 0x01 89#define XGBE_AN_INC_LINK 0x02 90#define XGBE_AN_PG_RCV 0x04 91 92#define XNP_MCF_NULL_MESSAGE 0x001 93#define XNP_ACK_PROCESSED (1 << 12) 94#define XNP_MP_FORMATTED (1 << 13) 95#define XNP_NP_EXCHANGE (1 << 15) 96 97#define XGBE_PHY_RATECHANGE_COUNT 500 98 99#ifndef MDIO_PMA_10GBR_PMD_CTRL 100#define MDIO_PMA_10GBR_PMD_CTRL 0x0096 101#endif 102 103#ifndef MDIO_PMA_10GBR_FEC_CTRL 104#define MDIO_PMA_10GBR_FEC_CTRL 0x00ab 105#endif 106 107#ifndef MDIO_AN_XNP 108#define MDIO_AN_XNP 0x0016 109#endif 110 111#ifndef MDIO_AN_INTMASK 112#define MDIO_AN_INTMASK 0x8001 113#endif 114 115#ifndef MDIO_AN_INT 116#define MDIO_AN_INT 0x8002 117#endif 118 119#ifndef MDIO_AN_KR_CTRL 120#define MDIO_AN_KR_CTRL 0x8003 121#endif 122 123#ifndef MDIO_CTRL1_SPEED1G 124#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) 125#endif 126 127#ifndef MDIO_KR_CTRL_PDETECT 128#define MDIO_KR_CTRL_PDETECT 0x01 129#endif 130 131/* SerDes integration register offsets */ 132#define SIR0_KR_RT_1 0x002c 133#define SIR0_STATUS 0x0040 134#define SIR1_SPEED 0x0000 135 136/* SerDes integration register entry bit positions and sizes */ 137#define SIR0_KR_RT_1_RESET_INDEX 11 138#define SIR0_KR_RT_1_RESET_WIDTH 1 139#define SIR0_STATUS_RX_READY_INDEX 0 140#define SIR0_STATUS_RX_READY_WIDTH 1 141#define SIR0_STATUS_TX_READY_INDEX 8 142#define SIR0_STATUS_TX_READY_WIDTH 1 143#define SIR1_SPEED_DATARATE_INDEX 4 144#define SIR1_SPEED_DATARATE_WIDTH 2 145#define SIR1_SPEED_PI_SPD_SEL_INDEX 12 146#define SIR1_SPEED_PI_SPD_SEL_WIDTH 4 147#define SIR1_SPEED_PLLSEL_INDEX 3 148#define SIR1_SPEED_PLLSEL_WIDTH 1 149#define SIR1_SPEED_RATECHANGE_INDEX 6 150#define SIR1_SPEED_RATECHANGE_WIDTH 1 151#define SIR1_SPEED_TXAMP_INDEX 8 152#define SIR1_SPEED_TXAMP_WIDTH 4 153#define SIR1_SPEED_WORDMODE_INDEX 0 154#define SIR1_SPEED_WORDMODE_WIDTH 3 155 156#define SPEED_10000_CDR 0x7 157#define SPEED_10000_PLL 0x1 158#define SPEED_10000_RATE 0x0 159#define SPEED_10000_TXAMP 0xa 160#define SPEED_10000_WORD 0x7 161 162#define SPEED_2500_CDR 0x2 163#define SPEED_2500_PLL 0x0 164#define SPEED_2500_RATE 0x1 165#define SPEED_2500_TXAMP 0xf 166#define SPEED_2500_WORD 0x1 167 168#define SPEED_1000_CDR 0x2 169#define SPEED_1000_PLL 0x0 170#define SPEED_1000_RATE 0x3 171#define SPEED_1000_TXAMP 0xf 172#define SPEED_1000_WORD 0x1 173 174/* SerDes RxTx register offsets */ 175#define RXTX_REG20 0x0050 176#define RXTX_REG114 0x01c8 177 178/* SerDes RxTx register entry bit positions and sizes */ 179#define RXTX_REG20_BLWC_ENA_INDEX 2 180#define RXTX_REG20_BLWC_ENA_WIDTH 1 181#define RXTX_REG114_PQ_REG_INDEX 9 182#define RXTX_REG114_PQ_REG_WIDTH 7 183 184#define RXTX_10000_BLWC 0 185#define RXTX_10000_PQ 0x1e 186 187#define RXTX_2500_BLWC 1 188#define RXTX_2500_PQ 0xa 189 190#define RXTX_1000_BLWC 1 191#define RXTX_1000_PQ 0xa 192 193/* Bit setting and getting macros 194 * The get macro will extract the current bit field value from within 195 * the variable 196 * 197 * The set macro will clear the current bit field value within the 198 * variable and then set the bit field of the variable to the 199 * specified value 200 */ 201#define GET_BITS(_var, _index, _width) \ 202 (((_var) >> (_index)) & ((0x1 << (_width)) - 1)) 203 204#define SET_BITS(_var, _index, _width, _val) \ 205do { \ 206 (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \ 207 (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ 208} while (0) 209 210#define XSIR_GET_BITS(_var, _prefix, _field) \ 211 GET_BITS((_var), \ 212 _prefix##_##_field##_INDEX, \ 213 _prefix##_##_field##_WIDTH) 214 215#define XSIR_SET_BITS(_var, _prefix, _field, _val) \ 216 SET_BITS((_var), \ 217 _prefix##_##_field##_INDEX, \ 218 _prefix##_##_field##_WIDTH, (_val)) 219 220/* Macros for reading or writing SerDes integration registers 221 * The ioread macros will get bit fields or full values using the 222 * register definitions formed using the input names 223 * 224 * The iowrite macros will set bit fields or full values using the 225 * register definitions formed using the input names 226 */ 227#define XSIR0_IOREAD(_priv, _reg) \ 228 ioread16((_priv)->sir0_regs + _reg) 229 230#define XSIR0_IOREAD_BITS(_priv, _reg, _field) \ 231 GET_BITS(XSIR0_IOREAD((_priv), _reg), \ 232 _reg##_##_field##_INDEX, \ 233 _reg##_##_field##_WIDTH) 234 235#define XSIR0_IOWRITE(_priv, _reg, _val) \ 236 iowrite16((_val), (_priv)->sir0_regs + _reg) 237 238#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val) \ 239do { \ 240 u16 reg_val = XSIR0_IOREAD((_priv), _reg); \ 241 SET_BITS(reg_val, \ 242 _reg##_##_field##_INDEX, \ 243 _reg##_##_field##_WIDTH, (_val)); \ 244 XSIR0_IOWRITE((_priv), _reg, reg_val); \ 245} while (0) 246 247#define XSIR1_IOREAD(_priv, _reg) \ 248 ioread16((_priv)->sir1_regs + _reg) 249 250#define XSIR1_IOREAD_BITS(_priv, _reg, _field) \ 251 GET_BITS(XSIR1_IOREAD((_priv), _reg), \ 252 _reg##_##_field##_INDEX, \ 253 _reg##_##_field##_WIDTH) 254 255#define XSIR1_IOWRITE(_priv, _reg, _val) \ 256 iowrite16((_val), (_priv)->sir1_regs + _reg) 257 258#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val) \ 259do { \ 260 u16 reg_val = XSIR1_IOREAD((_priv), _reg); \ 261 SET_BITS(reg_val, \ 262 _reg##_##_field##_INDEX, \ 263 _reg##_##_field##_WIDTH, (_val)); \ 264 XSIR1_IOWRITE((_priv), _reg, reg_val); \ 265} while (0) 266 267/* Macros for reading or writing SerDes RxTx registers 268 * The ioread macros will get bit fields or full values using the 269 * register definitions formed using the input names 270 * 271 * The iowrite macros will set bit fields or full values using the 272 * register definitions formed using the input names 273 */ 274#define XRXTX_IOREAD(_priv, _reg) \ 275 ioread16((_priv)->rxtx_regs + _reg) 276 277#define XRXTX_IOREAD_BITS(_priv, _reg, _field) \ 278 GET_BITS(XRXTX_IOREAD((_priv), _reg), \ 279 _reg##_##_field##_INDEX, \ 280 _reg##_##_field##_WIDTH) 281 282#define XRXTX_IOWRITE(_priv, _reg, _val) \ 283 iowrite16((_val), (_priv)->rxtx_regs + _reg) 284 285#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val) \ 286do { \ 287 u16 reg_val = XRXTX_IOREAD((_priv), _reg); \ 288 SET_BITS(reg_val, \ 289 _reg##_##_field##_INDEX, \ 290 _reg##_##_field##_WIDTH, (_val)); \ 291 XRXTX_IOWRITE((_priv), _reg, reg_val); \ 292} while (0) 293 294enum amd_xgbe_phy_an { 295 AMD_XGBE_AN_READY = 0, 296 AMD_XGBE_AN_START, 297 AMD_XGBE_AN_EVENT, 298 AMD_XGBE_AN_PAGE_RECEIVED, 299 AMD_XGBE_AN_INCOMPAT_LINK, 300 AMD_XGBE_AN_COMPLETE, 301 AMD_XGBE_AN_NO_LINK, 302 AMD_XGBE_AN_EXIT, 303 AMD_XGBE_AN_ERROR, 304}; 305 306enum amd_xgbe_phy_rx { 307 AMD_XGBE_RX_READY = 0, 308 AMD_XGBE_RX_BPA, 309 AMD_XGBE_RX_XNP, 310 AMD_XGBE_RX_COMPLETE, 311}; 312 313enum amd_xgbe_phy_mode { 314 AMD_XGBE_MODE_KR, 315 AMD_XGBE_MODE_KX, 316}; 317 318enum amd_xgbe_phy_speedset { 319 AMD_XGBE_PHY_SPEEDSET_1000_10000, 320 AMD_XGBE_PHY_SPEEDSET_2500_10000, 321}; 322 323struct amd_xgbe_phy_priv { 324 struct platform_device *pdev; 325 struct device *dev; 326 327 struct phy_device *phydev; 328 329 /* SerDes related mmio resources */ 330 struct resource *rxtx_res; 331 struct resource *sir0_res; 332 struct resource *sir1_res; 333 334 /* SerDes related mmio registers */ 335 void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */ 336 void __iomem *sir0_regs; /* SerDes integration registers (1/2) */ 337 void __iomem *sir1_regs; /* SerDes integration registers (2/2) */ 338 339 /* Maintain link status for re-starting auto-negotiation */ 340 unsigned int link; 341 unsigned int speed_set; 342 343 /* Auto-negotiation state machine support */ 344 struct mutex an_mutex; 345 enum amd_xgbe_phy_an an_result; 346 enum amd_xgbe_phy_an an_state; 347 enum amd_xgbe_phy_rx kr_state; 348 enum amd_xgbe_phy_rx kx_state; 349 struct work_struct an_work; 350 struct workqueue_struct *an_workqueue; 351 unsigned int parallel_detect; 352}; 353 354static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev) 355{ 356 int ret; 357 358 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); 359 if (ret < 0) 360 return ret; 361 362 ret |= 0x02; 363 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); 364 365 return 0; 366} 367 368static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev) 369{ 370 int ret; 371 372 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); 373 if (ret < 0) 374 return ret; 375 376 ret &= ~0x02; 377 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); 378 379 return 0; 380} 381 382static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev) 383{ 384 int ret; 385 386 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 387 if (ret < 0) 388 return ret; 389 390 ret |= MDIO_CTRL1_LPOWER; 391 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 392 393 usleep_range(75, 100); 394 395 ret &= ~MDIO_CTRL1_LPOWER; 396 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 397 398 return 0; 399} 400 401static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev) 402{ 403 struct amd_xgbe_phy_priv *priv = phydev->priv; 404 405 /* Assert Rx and Tx ratechange */ 406 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1); 407} 408 409static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev) 410{ 411 struct amd_xgbe_phy_priv *priv = phydev->priv; 412 unsigned int wait; 413 u16 status; 414 415 /* Release Rx and Tx ratechange */ 416 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0); 417 418 /* Wait for Rx and Tx ready */ 419 wait = XGBE_PHY_RATECHANGE_COUNT; 420 while (wait--) { 421 usleep_range(50, 75); 422 423 status = XSIR0_IOREAD(priv, SIR0_STATUS); 424 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && 425 XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) 426 return; 427 } 428 429 netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", 430 status); 431} 432 433static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) 434{ 435 struct amd_xgbe_phy_priv *priv = phydev->priv; 436 int ret; 437 438 /* Enable KR training */ 439 ret = amd_xgbe_an_enable_kr_training(phydev); 440 if (ret < 0) 441 return ret; 442 443 /* Set PCS to KR/10G speed */ 444 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); 445 if (ret < 0) 446 return ret; 447 448 ret &= ~MDIO_PCS_CTRL2_TYPE; 449 ret |= MDIO_PCS_CTRL2_10GBR; 450 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret); 451 452 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 453 if (ret < 0) 454 return ret; 455 456 ret &= ~MDIO_CTRL1_SPEEDSEL; 457 ret |= MDIO_CTRL1_SPEED10G; 458 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 459 460 ret = amd_xgbe_phy_pcs_power_cycle(phydev); 461 if (ret < 0) 462 return ret; 463 464 /* Set SerDes to 10G speed */ 465 amd_xgbe_phy_serdes_start_ratechange(phydev); 466 467 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE); 468 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD); 469 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_10000_TXAMP); 470 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL); 471 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_10000_CDR); 472 473 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_10000_BLWC); 474 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_10000_PQ); 475 476 amd_xgbe_phy_serdes_complete_ratechange(phydev); 477 478 return 0; 479} 480 481static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev) 482{ 483 struct amd_xgbe_phy_priv *priv = phydev->priv; 484 int ret; 485 486 /* Disable KR training */ 487 ret = amd_xgbe_an_disable_kr_training(phydev); 488 if (ret < 0) 489 return ret; 490 491 /* Set PCS to KX/1G speed */ 492 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); 493 if (ret < 0) 494 return ret; 495 496 ret &= ~MDIO_PCS_CTRL2_TYPE; 497 ret |= MDIO_PCS_CTRL2_10GBX; 498 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret); 499 500 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 501 if (ret < 0) 502 return ret; 503 504 ret &= ~MDIO_CTRL1_SPEEDSEL; 505 ret |= MDIO_CTRL1_SPEED1G; 506 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 507 508 ret = amd_xgbe_phy_pcs_power_cycle(phydev); 509 if (ret < 0) 510 return ret; 511 512 /* Set SerDes to 2.5G speed */ 513 amd_xgbe_phy_serdes_start_ratechange(phydev); 514 515 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE); 516 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD); 517 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_2500_TXAMP); 518 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL); 519 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_2500_CDR); 520 521 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_2500_BLWC); 522 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_2500_PQ); 523 524 amd_xgbe_phy_serdes_complete_ratechange(phydev); 525 526 return 0; 527} 528 529static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev) 530{ 531 struct amd_xgbe_phy_priv *priv = phydev->priv; 532 int ret; 533 534 /* Disable KR training */ 535 ret = amd_xgbe_an_disable_kr_training(phydev); 536 if (ret < 0) 537 return ret; 538 539 /* Set PCS to KX/1G speed */ 540 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); 541 if (ret < 0) 542 return ret; 543 544 ret &= ~MDIO_PCS_CTRL2_TYPE; 545 ret |= MDIO_PCS_CTRL2_10GBX; 546 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret); 547 548 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 549 if (ret < 0) 550 return ret; 551 552 ret &= ~MDIO_CTRL1_SPEEDSEL; 553 ret |= MDIO_CTRL1_SPEED1G; 554 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 555 556 ret = amd_xgbe_phy_pcs_power_cycle(phydev); 557 if (ret < 0) 558 return ret; 559 560 /* Set SerDes to 1G speed */ 561 amd_xgbe_phy_serdes_start_ratechange(phydev); 562 563 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE); 564 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD); 565 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP, SPEED_1000_TXAMP); 566 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL); 567 XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PI_SPD_SEL, SPEED_1000_CDR); 568 569 XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA, RXTX_1000_BLWC); 570 XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, RXTX_1000_PQ); 571 572 amd_xgbe_phy_serdes_complete_ratechange(phydev); 573 574 return 0; 575} 576 577static int amd_xgbe_phy_cur_mode(struct phy_device *phydev, 578 enum amd_xgbe_phy_mode *mode) 579{ 580 int ret; 581 582 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); 583 if (ret < 0) 584 return ret; 585 586 if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR) 587 *mode = AMD_XGBE_MODE_KR; 588 else 589 *mode = AMD_XGBE_MODE_KX; 590 591 return 0; 592} 593 594static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev) 595{ 596 enum amd_xgbe_phy_mode mode; 597 598 if (amd_xgbe_phy_cur_mode(phydev, &mode)) 599 return false; 600 601 return (mode == AMD_XGBE_MODE_KR); 602} 603 604static int amd_xgbe_phy_switch_mode(struct phy_device *phydev) 605{ 606 struct amd_xgbe_phy_priv *priv = phydev->priv; 607 int ret; 608 609 /* If we are in KR switch to KX, and vice-versa */ 610 if (amd_xgbe_phy_in_kr_mode(phydev)) { 611 if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000) 612 ret = amd_xgbe_phy_gmii_mode(phydev); 613 else 614 ret = amd_xgbe_phy_gmii_2500_mode(phydev); 615 } else { 616 ret = amd_xgbe_phy_xgmii_mode(phydev); 617 } 618 619 return ret; 620} 621 622static int amd_xgbe_phy_set_mode(struct phy_device *phydev, 623 enum amd_xgbe_phy_mode mode) 624{ 625 enum amd_xgbe_phy_mode cur_mode; 626 int ret; 627 628 ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode); 629 if (ret) 630 return ret; 631 632 if (mode != cur_mode) 633 ret = amd_xgbe_phy_switch_mode(phydev); 634 635 return ret; 636} 637 638static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, 639 enum amd_xgbe_phy_rx *state) 640{ 641 struct amd_xgbe_phy_priv *priv = phydev->priv; 642 int ad_reg, lp_reg, ret; 643 644 *state = AMD_XGBE_RX_COMPLETE; 645 646 /* If we're not in KR mode then we're done */ 647 if (!amd_xgbe_phy_in_kr_mode(phydev)) 648 return AMD_XGBE_AN_EVENT; 649 650 /* Enable/Disable FEC */ 651 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); 652 if (ad_reg < 0) 653 return AMD_XGBE_AN_ERROR; 654 655 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2); 656 if (lp_reg < 0) 657 return AMD_XGBE_AN_ERROR; 658 659 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL); 660 if (ret < 0) 661 return AMD_XGBE_AN_ERROR; 662 663 if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) 664 ret |= 0x01; 665 else 666 ret &= ~0x01; 667 668 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret); 669 670 /* Start KR training */ 671 ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); 672 if (ret < 0) 673 return AMD_XGBE_AN_ERROR; 674 675 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1); 676 677 ret |= 0x01; 678 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret); 679 680 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0); 681 682 return AMD_XGBE_AN_EVENT; 683} 684 685static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev, 686 enum amd_xgbe_phy_rx *state) 687{ 688 u16 msg; 689 690 *state = AMD_XGBE_RX_XNP; 691 692 msg = XNP_MCF_NULL_MESSAGE; 693 msg |= XNP_MP_FORMATTED; 694 695 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0); 696 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0); 697 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg); 698 699 return AMD_XGBE_AN_EVENT; 700} 701 702static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev, 703 enum amd_xgbe_phy_rx *state) 704{ 705 unsigned int link_support; 706 int ret, ad_reg, lp_reg; 707 708 /* Read Base Ability register 2 first */ 709 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1); 710 if (ret < 0) 711 return AMD_XGBE_AN_ERROR; 712 713 /* Check for a supported mode, otherwise restart in a different one */ 714 link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20; 715 if (!(ret & link_support)) 716 return AMD_XGBE_AN_INCOMPAT_LINK; 717 718 /* Check Extended Next Page support */ 719 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); 720 if (ad_reg < 0) 721 return AMD_XGBE_AN_ERROR; 722 723 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); 724 if (lp_reg < 0) 725 return AMD_XGBE_AN_ERROR; 726 727 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ? 728 amd_xgbe_an_tx_xnp(phydev, state) : 729 amd_xgbe_an_tx_training(phydev, state); 730} 731 732static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev, 733 enum amd_xgbe_phy_rx *state) 734{ 735 int ad_reg, lp_reg; 736 737 /* Check Extended Next Page support */ 738 ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); 739 if (ad_reg < 0) 740 return AMD_XGBE_AN_ERROR; 741 742 lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); 743 if (lp_reg < 0) 744 return AMD_XGBE_AN_ERROR; 745 746 return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ? 747 amd_xgbe_an_tx_xnp(phydev, state) : 748 amd_xgbe_an_tx_training(phydev, state); 749} 750 751static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev) 752{ 753 struct amd_xgbe_phy_priv *priv = phydev->priv; 754 int ret; 755 756 /* Be sure we aren't looping trying to negotiate */ 757 if (amd_xgbe_phy_in_kr_mode(phydev)) { 758 if (priv->kr_state != AMD_XGBE_RX_READY) 759 return AMD_XGBE_AN_NO_LINK; 760 priv->kr_state = AMD_XGBE_RX_BPA; 761 } else { 762 if (priv->kx_state != AMD_XGBE_RX_READY) 763 return AMD_XGBE_AN_NO_LINK; 764 priv->kx_state = AMD_XGBE_RX_BPA; 765 } 766 767 /* Set up Advertisement register 3 first */ 768 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); 769 if (ret < 0) 770 return AMD_XGBE_AN_ERROR; 771 772 if (phydev->supported & SUPPORTED_10000baseR_FEC) 773 ret |= 0xc000; 774 else 775 ret &= ~0xc000; 776 777 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret); 778 779 /* Set up Advertisement register 2 next */ 780 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); 781 if (ret < 0) 782 return AMD_XGBE_AN_ERROR; 783 784 if (phydev->supported & SUPPORTED_10000baseKR_Full) 785 ret |= 0x80; 786 else 787 ret &= ~0x80; 788 789 if ((phydev->supported & SUPPORTED_1000baseKX_Full) || 790 (phydev->supported & SUPPORTED_2500baseX_Full)) 791 ret |= 0x20; 792 else 793 ret &= ~0x20; 794 795 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret); 796 797 /* Set up Advertisement register 1 last */ 798 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); 799 if (ret < 0) 800 return AMD_XGBE_AN_ERROR; 801 802 if (phydev->supported & SUPPORTED_Pause) 803 ret |= 0x400; 804 else 805 ret &= ~0x400; 806 807 if (phydev->supported & SUPPORTED_Asym_Pause) 808 ret |= 0x800; 809 else 810 ret &= ~0x800; 811 812 /* We don't intend to perform XNP */ 813 ret &= ~XNP_NP_EXCHANGE; 814 815 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret); 816 817 /* Enable and start auto-negotiation */ 818 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); 819 820 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL); 821 if (ret < 0) 822 return AMD_XGBE_AN_ERROR; 823 824 ret |= MDIO_KR_CTRL_PDETECT; 825 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret); 826 827 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); 828 if (ret < 0) 829 return AMD_XGBE_AN_ERROR; 830 831 ret |= MDIO_AN_CTRL1_ENABLE; 832 ret |= MDIO_AN_CTRL1_RESTART; 833 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret); 834 835 return AMD_XGBE_AN_EVENT; 836} 837 838static enum amd_xgbe_phy_an amd_xgbe_an_event(struct phy_device *phydev) 839{ 840 enum amd_xgbe_phy_an new_state; 841 int ret; 842 843 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT); 844 if (ret < 0) 845 return AMD_XGBE_AN_ERROR; 846 847 new_state = AMD_XGBE_AN_EVENT; 848 if (ret & XGBE_AN_PG_RCV) 849 new_state = AMD_XGBE_AN_PAGE_RECEIVED; 850 else if (ret & XGBE_AN_INC_LINK) 851 new_state = AMD_XGBE_AN_INCOMPAT_LINK; 852 else if (ret & XGBE_AN_INT_CMPLT) 853 new_state = AMD_XGBE_AN_COMPLETE; 854 855 if (new_state != AMD_XGBE_AN_EVENT) 856 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); 857 858 return new_state; 859} 860 861static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev) 862{ 863 struct amd_xgbe_phy_priv *priv = phydev->priv; 864 enum amd_xgbe_phy_rx *state; 865 int ret; 866 867 state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state 868 : &priv->kx_state; 869 870 switch (*state) { 871 case AMD_XGBE_RX_BPA: 872 ret = amd_xgbe_an_rx_bpa(phydev, state); 873 break; 874 875 case AMD_XGBE_RX_XNP: 876 ret = amd_xgbe_an_rx_xnp(phydev, state); 877 break; 878 879 default: 880 ret = AMD_XGBE_AN_ERROR; 881 } 882 883 return ret; 884} 885 886static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) 887{ 888 int ret; 889 890 ret = amd_xgbe_phy_switch_mode(phydev); 891 if (ret) 892 return AMD_XGBE_AN_ERROR; 893 894 return AMD_XGBE_AN_START; 895} 896 897static void amd_xgbe_an_state_machine(struct work_struct *work) 898{ 899 struct amd_xgbe_phy_priv *priv = container_of(work, 900 struct amd_xgbe_phy_priv, 901 an_work); 902 struct phy_device *phydev = priv->phydev; 903 enum amd_xgbe_phy_an cur_state; 904 int sleep; 905 unsigned int an_supported = 0; 906 907 /* Start in KX mode */ 908 if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX)) 909 priv->an_state = AMD_XGBE_AN_ERROR; 910 911 while (1) { 912 mutex_lock(&priv->an_mutex); 913 914 cur_state = priv->an_state; 915 916 switch (priv->an_state) { 917 case AMD_XGBE_AN_START: 918 an_supported = 0; 919 priv->parallel_detect = 0; 920 priv->an_state = amd_xgbe_an_start(phydev); 921 break; 922 923 case AMD_XGBE_AN_EVENT: 924 priv->an_state = amd_xgbe_an_event(phydev); 925 break; 926 927 case AMD_XGBE_AN_PAGE_RECEIVED: 928 priv->an_state = amd_xgbe_an_page_received(phydev); 929 an_supported++; 930 break; 931 932 case AMD_XGBE_AN_INCOMPAT_LINK: 933 priv->an_state = amd_xgbe_an_incompat_link(phydev); 934 break; 935 936 case AMD_XGBE_AN_COMPLETE: 937 priv->parallel_detect = an_supported ? 0 : 1; 938 netdev_info(phydev->attached_dev, "%s successful\n", 939 an_supported ? "Auto negotiation" 940 : "Parallel detection"); 941 /* fall through */ 942 943 case AMD_XGBE_AN_NO_LINK: 944 case AMD_XGBE_AN_EXIT: 945 goto exit_unlock; 946 947 default: 948 priv->an_state = AMD_XGBE_AN_ERROR; 949 } 950 951 if (priv->an_state == AMD_XGBE_AN_ERROR) { 952 netdev_err(phydev->attached_dev, 953 "error during auto-negotiation, state=%u\n", 954 cur_state); 955 goto exit_unlock; 956 } 957 958 sleep = (priv->an_state == AMD_XGBE_AN_EVENT) ? 1 : 0; 959 960 mutex_unlock(&priv->an_mutex); 961 962 if (sleep) 963 usleep_range(20, 50); 964 } 965 966exit_unlock: 967 priv->an_result = priv->an_state; 968 priv->an_state = AMD_XGBE_AN_READY; 969 970 mutex_unlock(&priv->an_mutex); 971} 972 973static int amd_xgbe_phy_soft_reset(struct phy_device *phydev) 974{ 975 int count, ret; 976 977 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 978 if (ret < 0) 979 return ret; 980 981 ret |= MDIO_CTRL1_RESET; 982 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 983 984 count = 50; 985 do { 986 msleep(20); 987 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 988 if (ret < 0) 989 return ret; 990 } while ((ret & MDIO_CTRL1_RESET) && --count); 991 992 if (ret & MDIO_CTRL1_RESET) 993 return -ETIMEDOUT; 994 995 return 0; 996} 997 998static int amd_xgbe_phy_config_init(struct phy_device *phydev) 999{ 1000 struct amd_xgbe_phy_priv *priv = phydev->priv; 1001 1002 /* Initialize supported features */ 1003 phydev->supported = SUPPORTED_Autoneg; 1004 phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 1005 phydev->supported |= SUPPORTED_Backplane; 1006 phydev->supported |= SUPPORTED_10000baseKR_Full | 1007 SUPPORTED_10000baseR_FEC; 1008 switch (priv->speed_set) { 1009 case AMD_XGBE_PHY_SPEEDSET_1000_10000: 1010 phydev->supported |= SUPPORTED_1000baseKX_Full; 1011 break; 1012 case AMD_XGBE_PHY_SPEEDSET_2500_10000: 1013 phydev->supported |= SUPPORTED_2500baseX_Full; 1014 break; 1015 } 1016 phydev->advertising = phydev->supported; 1017 1018 /* Turn off and clear interrupts */ 1019 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); 1020 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); 1021 1022 return 0; 1023} 1024 1025static int amd_xgbe_phy_setup_forced(struct phy_device *phydev) 1026{ 1027 int ret; 1028 1029 /* Disable auto-negotiation */ 1030 ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); 1031 if (ret < 0) 1032 return ret; 1033 1034 ret &= ~MDIO_AN_CTRL1_ENABLE; 1035 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret); 1036 1037 /* Validate/Set specified speed */ 1038 switch (phydev->speed) { 1039 case SPEED_10000: 1040 ret = amd_xgbe_phy_xgmii_mode(phydev); 1041 break; 1042 1043 case SPEED_2500: 1044 ret = amd_xgbe_phy_gmii_2500_mode(phydev); 1045 break; 1046 1047 case SPEED_1000: 1048 ret = amd_xgbe_phy_gmii_mode(phydev); 1049 break; 1050 1051 default: 1052 ret = -EINVAL; 1053 } 1054 1055 if (ret < 0) 1056 return ret; 1057 1058 /* Validate duplex mode */ 1059 if (phydev->duplex != DUPLEX_FULL) 1060 return -EINVAL; 1061 1062 phydev->pause = 0; 1063 phydev->asym_pause = 0; 1064 1065 return 0; 1066} 1067 1068static int amd_xgbe_phy_config_aneg(struct phy_device *phydev) 1069{ 1070 struct amd_xgbe_phy_priv *priv = phydev->priv; 1071 u32 mmd_mask = phydev->c45_ids.devices_in_package; 1072 1073 if (phydev->autoneg != AUTONEG_ENABLE) 1074 return amd_xgbe_phy_setup_forced(phydev); 1075 1076 /* Make sure we have the AN MMD present */ 1077 if (!(mmd_mask & MDIO_DEVS_AN)) 1078 return -EINVAL; 1079 1080 /* Start/Restart the auto-negotiation state machine */ 1081 mutex_lock(&priv->an_mutex); 1082 priv->an_result = AMD_XGBE_AN_READY; 1083 priv->an_state = AMD_XGBE_AN_START; 1084 priv->kr_state = AMD_XGBE_RX_READY; 1085 priv->kx_state = AMD_XGBE_RX_READY; 1086 mutex_unlock(&priv->an_mutex); 1087 1088 queue_work(priv->an_workqueue, &priv->an_work); 1089 1090 return 0; 1091} 1092 1093static int amd_xgbe_phy_aneg_done(struct phy_device *phydev) 1094{ 1095 struct amd_xgbe_phy_priv *priv = phydev->priv; 1096 enum amd_xgbe_phy_an state; 1097 1098 mutex_lock(&priv->an_mutex); 1099 state = priv->an_result; 1100 mutex_unlock(&priv->an_mutex); 1101 1102 return (state == AMD_XGBE_AN_COMPLETE); 1103} 1104 1105static int amd_xgbe_phy_update_link(struct phy_device *phydev) 1106{ 1107 struct amd_xgbe_phy_priv *priv = phydev->priv; 1108 enum amd_xgbe_phy_an state; 1109 unsigned int check_again, autoneg; 1110 int ret; 1111 1112 /* If we're doing auto-negotiation don't report link down */ 1113 mutex_lock(&priv->an_mutex); 1114 state = priv->an_state; 1115 mutex_unlock(&priv->an_mutex); 1116 1117 if (state != AMD_XGBE_AN_READY) { 1118 phydev->link = 1; 1119 return 0; 1120 } 1121 1122 /* Since the device can be in the wrong mode when a link is 1123 * (re-)established (cable connected after the interface is 1124 * up, etc.), the link status may report no link. If there 1125 * is no link, try switching modes and checking the status 1126 * again if auto negotiation is enabled. 1127 */ 1128 check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0; 1129again: 1130 /* Link status is latched low, so read once to clear 1131 * and then read again to get current state 1132 */ 1133 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); 1134 if (ret < 0) 1135 return ret; 1136 1137 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1); 1138 if (ret < 0) 1139 return ret; 1140 1141 phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0; 1142 1143 if (!phydev->link) { 1144 if (check_again) { 1145 ret = amd_xgbe_phy_switch_mode(phydev); 1146 if (ret < 0) 1147 return ret; 1148 check_again = 0; 1149 goto again; 1150 } 1151 } 1152 1153 autoneg = (phydev->link && !priv->link) ? 1 : 0; 1154 priv->link = phydev->link; 1155 if (autoneg) { 1156 /* Link is (back) up, re-start auto-negotiation */ 1157 ret = amd_xgbe_phy_config_aneg(phydev); 1158 if (ret < 0) 1159 return ret; 1160 } 1161 1162 return 0; 1163} 1164 1165static int amd_xgbe_phy_read_status(struct phy_device *phydev) 1166{ 1167 struct amd_xgbe_phy_priv *priv = phydev->priv; 1168 u32 mmd_mask = phydev->c45_ids.devices_in_package; 1169 int ret, ad_ret, lp_ret; 1170 1171 ret = amd_xgbe_phy_update_link(phydev); 1172 if (ret) 1173 return ret; 1174 1175 if ((phydev->autoneg == AUTONEG_ENABLE) && 1176 !priv->parallel_detect) { 1177 if (!(mmd_mask & MDIO_DEVS_AN)) 1178 return -EINVAL; 1179 1180 if (!amd_xgbe_phy_aneg_done(phydev)) 1181 return 0; 1182 1183 /* Compare Advertisement and Link Partner register 1 */ 1184 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); 1185 if (ad_ret < 0) 1186 return ad_ret; 1187 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA); 1188 if (lp_ret < 0) 1189 return lp_ret; 1190 1191 ad_ret &= lp_ret; 1192 phydev->pause = (ad_ret & 0x400) ? 1 : 0; 1193 phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0; 1194 1195 /* Compare Advertisement and Link Partner register 2 */ 1196 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, 1197 MDIO_AN_ADVERTISE + 1); 1198 if (ad_ret < 0) 1199 return ad_ret; 1200 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1); 1201 if (lp_ret < 0) 1202 return lp_ret; 1203 1204 ad_ret &= lp_ret; 1205 if (ad_ret & 0x80) { 1206 phydev->speed = SPEED_10000; 1207 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR); 1208 if (ret) 1209 return ret; 1210 } else { 1211 switch (priv->speed_set) { 1212 case AMD_XGBE_PHY_SPEEDSET_1000_10000: 1213 phydev->speed = SPEED_1000; 1214 break; 1215 1216 case AMD_XGBE_PHY_SPEEDSET_2500_10000: 1217 phydev->speed = SPEED_2500; 1218 break; 1219 } 1220 1221 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX); 1222 if (ret) 1223 return ret; 1224 } 1225 1226 phydev->duplex = DUPLEX_FULL; 1227 } else { 1228 if (amd_xgbe_phy_in_kr_mode(phydev)) { 1229 phydev->speed = SPEED_10000; 1230 } else { 1231 switch (priv->speed_set) { 1232 case AMD_XGBE_PHY_SPEEDSET_1000_10000: 1233 phydev->speed = SPEED_1000; 1234 break; 1235 1236 case AMD_XGBE_PHY_SPEEDSET_2500_10000: 1237 phydev->speed = SPEED_2500; 1238 break; 1239 } 1240 } 1241 phydev->duplex = DUPLEX_FULL; 1242 phydev->pause = 0; 1243 phydev->asym_pause = 0; 1244 } 1245 1246 return 0; 1247} 1248 1249static int amd_xgbe_phy_suspend(struct phy_device *phydev) 1250{ 1251 int ret; 1252 1253 mutex_lock(&phydev->lock); 1254 1255 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 1256 if (ret < 0) 1257 goto unlock; 1258 1259 ret |= MDIO_CTRL1_LPOWER; 1260 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 1261 1262 ret = 0; 1263 1264unlock: 1265 mutex_unlock(&phydev->lock); 1266 1267 return ret; 1268} 1269 1270static int amd_xgbe_phy_resume(struct phy_device *phydev) 1271{ 1272 int ret; 1273 1274 mutex_lock(&phydev->lock); 1275 1276 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); 1277 if (ret < 0) 1278 goto unlock; 1279 1280 ret &= ~MDIO_CTRL1_LPOWER; 1281 phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret); 1282 1283 ret = 0; 1284 1285unlock: 1286 mutex_unlock(&phydev->lock); 1287 1288 return ret; 1289} 1290 1291static int amd_xgbe_phy_probe(struct phy_device *phydev) 1292{ 1293 struct amd_xgbe_phy_priv *priv; 1294 struct platform_device *pdev; 1295 struct device *dev; 1296 char *wq_name; 1297 const __be32 *property; 1298 unsigned int speed_set; 1299 int ret; 1300 1301 if (!phydev->dev.of_node) 1302 return -EINVAL; 1303 1304 pdev = of_find_device_by_node(phydev->dev.of_node); 1305 if (!pdev) 1306 return -EINVAL; 1307 dev = &pdev->dev; 1308 1309 wq_name = kasprintf(GFP_KERNEL, "%s-amd-xgbe-phy", phydev->bus->name); 1310 if (!wq_name) { 1311 ret = -ENOMEM; 1312 goto err_pdev; 1313 } 1314 1315 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 1316 if (!priv) { 1317 ret = -ENOMEM; 1318 goto err_name; 1319 } 1320 1321 priv->pdev = pdev; 1322 priv->dev = dev; 1323 priv->phydev = phydev; 1324 1325 /* Get the device mmio areas */ 1326 priv->rxtx_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1327 priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res); 1328 if (IS_ERR(priv->rxtx_regs)) { 1329 dev_err(dev, "rxtx ioremap failed\n"); 1330 ret = PTR_ERR(priv->rxtx_regs); 1331 goto err_priv; 1332 } 1333 1334 priv->sir0_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1335 priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res); 1336 if (IS_ERR(priv->sir0_regs)) { 1337 dev_err(dev, "sir0 ioremap failed\n"); 1338 ret = PTR_ERR(priv->sir0_regs); 1339 goto err_rxtx; 1340 } 1341 1342 priv->sir1_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 1343 priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res); 1344 if (IS_ERR(priv->sir1_regs)) { 1345 dev_err(dev, "sir1 ioremap failed\n"); 1346 ret = PTR_ERR(priv->sir1_regs); 1347 goto err_sir0; 1348 } 1349 1350 /* Get the device speed set property */ 1351 speed_set = 0; 1352 property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY, 1353 NULL); 1354 if (property) 1355 speed_set = be32_to_cpu(*property); 1356 1357 switch (speed_set) { 1358 case 0: 1359 priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000; 1360 break; 1361 case 1: 1362 priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000; 1363 break; 1364 default: 1365 dev_err(dev, "invalid amd,speed-set property\n"); 1366 ret = -EINVAL; 1367 goto err_sir1; 1368 } 1369 1370 priv->link = 1; 1371 1372 mutex_init(&priv->an_mutex); 1373 INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine); 1374 priv->an_workqueue = create_singlethread_workqueue(wq_name); 1375 if (!priv->an_workqueue) { 1376 ret = -ENOMEM; 1377 goto err_sir1; 1378 } 1379 1380 phydev->priv = priv; 1381 1382 kfree(wq_name); 1383 of_dev_put(pdev); 1384 1385 return 0; 1386 1387err_sir1: 1388 devm_iounmap(dev, priv->sir1_regs); 1389 devm_release_mem_region(dev, priv->sir1_res->start, 1390 resource_size(priv->sir1_res)); 1391 1392err_sir0: 1393 devm_iounmap(dev, priv->sir0_regs); 1394 devm_release_mem_region(dev, priv->sir0_res->start, 1395 resource_size(priv->sir0_res)); 1396 1397err_rxtx: 1398 devm_iounmap(dev, priv->rxtx_regs); 1399 devm_release_mem_region(dev, priv->rxtx_res->start, 1400 resource_size(priv->rxtx_res)); 1401 1402err_priv: 1403 devm_kfree(dev, priv); 1404 1405err_name: 1406 kfree(wq_name); 1407 1408err_pdev: 1409 of_dev_put(pdev); 1410 1411 return ret; 1412} 1413 1414static void amd_xgbe_phy_remove(struct phy_device *phydev) 1415{ 1416 struct amd_xgbe_phy_priv *priv = phydev->priv; 1417 struct device *dev = priv->dev; 1418 1419 /* Stop any in process auto-negotiation */ 1420 mutex_lock(&priv->an_mutex); 1421 priv->an_state = AMD_XGBE_AN_EXIT; 1422 mutex_unlock(&priv->an_mutex); 1423 1424 flush_workqueue(priv->an_workqueue); 1425 destroy_workqueue(priv->an_workqueue); 1426 1427 /* Release resources */ 1428 devm_iounmap(dev, priv->sir1_regs); 1429 devm_release_mem_region(dev, priv->sir1_res->start, 1430 resource_size(priv->sir1_res)); 1431 1432 devm_iounmap(dev, priv->sir0_regs); 1433 devm_release_mem_region(dev, priv->sir0_res->start, 1434 resource_size(priv->sir0_res)); 1435 1436 devm_iounmap(dev, priv->rxtx_regs); 1437 devm_release_mem_region(dev, priv->rxtx_res->start, 1438 resource_size(priv->rxtx_res)); 1439 1440 devm_kfree(dev, priv); 1441} 1442 1443static int amd_xgbe_match_phy_device(struct phy_device *phydev) 1444{ 1445 return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID; 1446} 1447 1448static struct phy_driver amd_xgbe_phy_driver[] = { 1449 { 1450 .phy_id = XGBE_PHY_ID, 1451 .phy_id_mask = XGBE_PHY_MASK, 1452 .name = "AMD XGBE PHY", 1453 .features = 0, 1454 .probe = amd_xgbe_phy_probe, 1455 .remove = amd_xgbe_phy_remove, 1456 .soft_reset = amd_xgbe_phy_soft_reset, 1457 .config_init = amd_xgbe_phy_config_init, 1458 .suspend = amd_xgbe_phy_suspend, 1459 .resume = amd_xgbe_phy_resume, 1460 .config_aneg = amd_xgbe_phy_config_aneg, 1461 .aneg_done = amd_xgbe_phy_aneg_done, 1462 .read_status = amd_xgbe_phy_read_status, 1463 .match_phy_device = amd_xgbe_match_phy_device, 1464 .driver = { 1465 .owner = THIS_MODULE, 1466 }, 1467 }, 1468}; 1469 1470static int __init amd_xgbe_phy_init(void) 1471{ 1472 return phy_drivers_register(amd_xgbe_phy_driver, 1473 ARRAY_SIZE(amd_xgbe_phy_driver)); 1474} 1475 1476static void __exit amd_xgbe_phy_exit(void) 1477{ 1478 phy_drivers_unregister(amd_xgbe_phy_driver, 1479 ARRAY_SIZE(amd_xgbe_phy_driver)); 1480} 1481 1482module_init(amd_xgbe_phy_init); 1483module_exit(amd_xgbe_phy_exit); 1484 1485static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = { 1486 { XGBE_PHY_ID, XGBE_PHY_MASK }, 1487 { } 1488}; 1489MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids); 1490