bfin_can.c revision e9dcd1613f0ac0b3573b7d813a2c5672cd8302eb
1/* 2 * Blackfin On-Chip CAN Driver 3 * 4 * Copyright 2004-2009 Analog Devices Inc. 5 * 6 * Enter bugs at http://blackfin.uclinux.org/ 7 * 8 * Licensed under the GPL-2 or later. 9 */ 10 11#include <linux/module.h> 12#include <linux/init.h> 13#include <linux/kernel.h> 14#include <linux/bitops.h> 15#include <linux/interrupt.h> 16#include <linux/errno.h> 17#include <linux/netdevice.h> 18#include <linux/skbuff.h> 19#include <linux/platform_device.h> 20 21#include <linux/can.h> 22#include <linux/can/dev.h> 23#include <linux/can/error.h> 24 25#include <asm/portmux.h> 26 27#define DRV_NAME "bfin_can" 28#define BFIN_CAN_TIMEOUT 100 29#define TX_ECHO_SKB_MAX 1 30 31/* 32 * transmit and receive channels 33 */ 34#define TRANSMIT_CHL 24 35#define RECEIVE_STD_CHL 0 36#define RECEIVE_EXT_CHL 4 37#define RECEIVE_RTR_CHL 8 38#define RECEIVE_EXT_RTR_CHL 12 39#define MAX_CHL_NUMBER 32 40 41/* 42 * bfin can registers layout 43 */ 44struct bfin_can_mask_regs { 45 u16 aml; 46 u16 dummy1; 47 u16 amh; 48 u16 dummy2; 49}; 50 51struct bfin_can_channel_regs { 52 u16 data[8]; 53 u16 dlc; 54 u16 dummy1; 55 u16 tsv; 56 u16 dummy2; 57 u16 id0; 58 u16 dummy3; 59 u16 id1; 60 u16 dummy4; 61}; 62 63struct bfin_can_regs { 64 /* 65 * global control and status registers 66 */ 67 u16 mc1; /* offset 0 */ 68 u16 dummy1; 69 u16 md1; /* offset 4 */ 70 u16 rsv1[13]; 71 u16 mbtif1; /* offset 0x20 */ 72 u16 dummy2; 73 u16 mbrif1; /* offset 0x24 */ 74 u16 dummy3; 75 u16 mbim1; /* offset 0x28 */ 76 u16 rsv2[11]; 77 u16 mc2; /* offset 0x40 */ 78 u16 dummy4; 79 u16 md2; /* offset 0x44 */ 80 u16 dummy5; 81 u16 trs2; /* offset 0x48 */ 82 u16 rsv3[11]; 83 u16 mbtif2; /* offset 0x60 */ 84 u16 dummy6; 85 u16 mbrif2; /* offset 0x64 */ 86 u16 dummy7; 87 u16 mbim2; /* offset 0x68 */ 88 u16 rsv4[11]; 89 u16 clk; /* offset 0x80 */ 90 u16 dummy8; 91 u16 timing; /* offset 0x84 */ 92 u16 rsv5[3]; 93 u16 status; /* offset 0x8c */ 94 u16 dummy9; 95 u16 cec; /* offset 0x90 */ 96 u16 dummy10; 97 u16 gis; /* offset 0x94 */ 98 u16 dummy11; 99 u16 gim; /* offset 0x98 */ 100 u16 rsv6[3]; 101 u16 ctrl; /* offset 0xa0 */ 102 u16 dummy12; 103 u16 intr; /* offset 0xa4 */ 104 u16 rsv7[7]; 105 u16 esr; /* offset 0xb4 */ 106 u16 rsv8[37]; 107 108 /* 109 * channel(mailbox) mask and message registers 110 */ 111 struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */ 112 struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */ 113}; 114 115/* 116 * bfin can private data 117 */ 118struct bfin_can_priv { 119 struct can_priv can; /* must be the first member */ 120 struct net_device *dev; 121 void __iomem *membase; 122 int rx_irq; 123 int tx_irq; 124 int err_irq; 125 unsigned short *pin_list; 126}; 127 128/* 129 * bfin can timing parameters 130 */ 131static struct can_bittiming_const bfin_can_bittiming_const = { 132 .name = DRV_NAME, 133 .tseg1_min = 1, 134 .tseg1_max = 16, 135 .tseg2_min = 1, 136 .tseg2_max = 8, 137 .sjw_max = 4, 138 /* 139 * Although the BRP field can be set to any value, it is recommended 140 * that the value be greater than or equal to 4, as restrictions 141 * apply to the bit timing configuration when BRP is less than 4. 142 */ 143 .brp_min = 4, 144 .brp_max = 1024, 145 .brp_inc = 1, 146}; 147 148static int bfin_can_set_bittiming(struct net_device *dev) 149{ 150 struct bfin_can_priv *priv = netdev_priv(dev); 151 struct bfin_can_regs __iomem *reg = priv->membase; 152 struct can_bittiming *bt = &priv->can.bittiming; 153 u16 clk, timing; 154 155 clk = bt->brp - 1; 156 timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) | 157 ((bt->phase_seg2 - 1) << 4); 158 159 /* 160 * If the SAM bit is set, the input signal is oversampled three times 161 * at the SCLK rate. 162 */ 163 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) 164 timing |= SAM; 165 166 bfin_write16(®->clk, clk); 167 bfin_write16(®->timing, timing); 168 169 dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n", 170 clk, timing); 171 172 return 0; 173} 174 175static void bfin_can_set_reset_mode(struct net_device *dev) 176{ 177 struct bfin_can_priv *priv = netdev_priv(dev); 178 struct bfin_can_regs __iomem *reg = priv->membase; 179 int timeout = BFIN_CAN_TIMEOUT; 180 int i; 181 182 /* disable interrupts */ 183 bfin_write16(®->mbim1, 0); 184 bfin_write16(®->mbim2, 0); 185 bfin_write16(®->gim, 0); 186 187 /* reset can and enter configuration mode */ 188 bfin_write16(®->ctrl, SRS | CCR); 189 SSYNC(); 190 bfin_write16(®->ctrl, CCR); 191 SSYNC(); 192 while (!(bfin_read16(®->ctrl) & CCA)) { 193 udelay(10); 194 if (--timeout == 0) { 195 dev_err(dev->dev.parent, 196 "fail to enter configuration mode\n"); 197 BUG(); 198 } 199 } 200 201 /* 202 * All mailbox configurations are marked as inactive 203 * by writing to CAN Mailbox Configuration Registers 1 and 2 204 * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled 205 */ 206 bfin_write16(®->mc1, 0); 207 bfin_write16(®->mc2, 0); 208 209 /* Set Mailbox Direction */ 210 bfin_write16(®->md1, 0xFFFF); /* mailbox 1-16 are RX */ 211 bfin_write16(®->md2, 0); /* mailbox 17-32 are TX */ 212 213 /* RECEIVE_STD_CHL */ 214 for (i = 0; i < 2; i++) { 215 bfin_write16(®->chl[RECEIVE_STD_CHL + i].id0, 0); 216 bfin_write16(®->chl[RECEIVE_STD_CHL + i].id1, AME); 217 bfin_write16(®->chl[RECEIVE_STD_CHL + i].dlc, 0); 218 bfin_write16(®->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF); 219 bfin_write16(®->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF); 220 } 221 222 /* RECEIVE_EXT_CHL */ 223 for (i = 0; i < 2; i++) { 224 bfin_write16(®->chl[RECEIVE_EXT_CHL + i].id0, 0); 225 bfin_write16(®->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE); 226 bfin_write16(®->chl[RECEIVE_EXT_CHL + i].dlc, 0); 227 bfin_write16(®->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF); 228 bfin_write16(®->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF); 229 } 230 231 bfin_write16(®->mc2, BIT(TRANSMIT_CHL - 16)); 232 bfin_write16(®->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL)); 233 SSYNC(); 234 235 priv->can.state = CAN_STATE_STOPPED; 236} 237 238static void bfin_can_set_normal_mode(struct net_device *dev) 239{ 240 struct bfin_can_priv *priv = netdev_priv(dev); 241 struct bfin_can_regs __iomem *reg = priv->membase; 242 int timeout = BFIN_CAN_TIMEOUT; 243 244 /* 245 * leave configuration mode 246 */ 247 bfin_write16(®->ctrl, bfin_read16(®->ctrl) & ~CCR); 248 249 while (bfin_read16(®->status) & CCA) { 250 udelay(10); 251 if (--timeout == 0) { 252 dev_err(dev->dev.parent, 253 "fail to leave configuration mode\n"); 254 BUG(); 255 } 256 } 257 258 /* 259 * clear _All_ tx and rx interrupts 260 */ 261 bfin_write16(®->mbtif1, 0xFFFF); 262 bfin_write16(®->mbtif2, 0xFFFF); 263 bfin_write16(®->mbrif1, 0xFFFF); 264 bfin_write16(®->mbrif2, 0xFFFF); 265 266 /* 267 * clear global interrupt status register 268 */ 269 bfin_write16(®->gis, 0x7FF); /* overwrites with '1' */ 270 271 /* 272 * Initialize Interrupts 273 * - set bits in the mailbox interrupt mask register 274 * - global interrupt mask 275 */ 276 bfin_write16(®->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL)); 277 bfin_write16(®->mbim2, BIT(TRANSMIT_CHL - 16)); 278 279 bfin_write16(®->gim, EPIM | BOIM | RMLIM); 280 SSYNC(); 281} 282 283static void bfin_can_start(struct net_device *dev) 284{ 285 struct bfin_can_priv *priv = netdev_priv(dev); 286 287 /* enter reset mode */ 288 if (priv->can.state != CAN_STATE_STOPPED) 289 bfin_can_set_reset_mode(dev); 290 291 /* leave reset mode */ 292 bfin_can_set_normal_mode(dev); 293} 294 295static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode) 296{ 297 switch (mode) { 298 case CAN_MODE_START: 299 bfin_can_start(dev); 300 if (netif_queue_stopped(dev)) 301 netif_wake_queue(dev); 302 break; 303 304 default: 305 return -EOPNOTSUPP; 306 } 307 308 return 0; 309} 310 311static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev) 312{ 313 struct bfin_can_priv *priv = netdev_priv(dev); 314 struct bfin_can_regs __iomem *reg = priv->membase; 315 struct can_frame *cf = (struct can_frame *)skb->data; 316 u8 dlc = cf->can_dlc; 317 canid_t id = cf->can_id; 318 u8 *data = cf->data; 319 u16 val; 320 int i; 321 322 if (can_dropped_invalid_skb(dev, skb)) 323 return NETDEV_TX_OK; 324 325 netif_stop_queue(dev); 326 327 /* fill id */ 328 if (id & CAN_EFF_FLAG) { 329 bfin_write16(®->chl[TRANSMIT_CHL].id0, id); 330 if (id & CAN_RTR_FLAG) 331 writew(((id & 0x1FFF0000) >> 16) | IDE | AME | RTR, 332 ®->chl[TRANSMIT_CHL].id1); 333 else 334 writew(((id & 0x1FFF0000) >> 16) | IDE | AME, 335 ®->chl[TRANSMIT_CHL].id1); 336 337 } else { 338 if (id & CAN_RTR_FLAG) 339 writew((id << 2) | AME | RTR, 340 ®->chl[TRANSMIT_CHL].id1); 341 else 342 bfin_write16(®->chl[TRANSMIT_CHL].id1, 343 (id << 2) | AME); 344 } 345 346 /* fill payload */ 347 for (i = 0; i < 8; i += 2) { 348 val = ((7 - i) < dlc ? (data[7 - i]) : 0) + 349 ((6 - i) < dlc ? (data[6 - i] << 8) : 0); 350 bfin_write16(®->chl[TRANSMIT_CHL].data[i], val); 351 } 352 353 /* fill data length code */ 354 bfin_write16(®->chl[TRANSMIT_CHL].dlc, dlc); 355 356 dev->trans_start = jiffies; 357 358 can_put_echo_skb(skb, dev, 0); 359 360 /* set transmit request */ 361 bfin_write16(®->trs2, BIT(TRANSMIT_CHL - 16)); 362 363 return 0; 364} 365 366static void bfin_can_rx(struct net_device *dev, u16 isrc) 367{ 368 struct bfin_can_priv *priv = netdev_priv(dev); 369 struct net_device_stats *stats = &dev->stats; 370 struct bfin_can_regs __iomem *reg = priv->membase; 371 struct can_frame *cf; 372 struct sk_buff *skb; 373 int obj; 374 int i; 375 u16 val; 376 377 skb = alloc_can_skb(dev, &cf); 378 if (skb == NULL) 379 return; 380 381 /* get id */ 382 if (isrc & BIT(RECEIVE_EXT_CHL)) { 383 /* extended frame format (EFF) */ 384 cf->can_id = ((bfin_read16(®->chl[RECEIVE_EXT_CHL].id1) 385 & 0x1FFF) << 16) 386 + bfin_read16(®->chl[RECEIVE_EXT_CHL].id0); 387 cf->can_id |= CAN_EFF_FLAG; 388 obj = RECEIVE_EXT_CHL; 389 } else { 390 /* standard frame format (SFF) */ 391 cf->can_id = (bfin_read16(®->chl[RECEIVE_STD_CHL].id1) 392 & 0x1ffc) >> 2; 393 obj = RECEIVE_STD_CHL; 394 } 395 if (bfin_read16(®->chl[obj].id1) & RTR) 396 cf->can_id |= CAN_RTR_FLAG; 397 398 /* get data length code */ 399 cf->can_dlc = get_can_dlc(bfin_read16(®->chl[obj].dlc) & 0xF); 400 401 /* get payload */ 402 for (i = 0; i < 8; i += 2) { 403 val = bfin_read16(®->chl[obj].data[i]); 404 cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0; 405 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; 406 } 407 408 netif_rx(skb); 409 410 stats->rx_packets++; 411 stats->rx_bytes += cf->can_dlc; 412} 413 414static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) 415{ 416 struct bfin_can_priv *priv = netdev_priv(dev); 417 struct bfin_can_regs __iomem *reg = priv->membase; 418 struct net_device_stats *stats = &dev->stats; 419 struct can_frame *cf; 420 struct sk_buff *skb; 421 enum can_state state = priv->can.state; 422 423 skb = alloc_can_err_skb(dev, &cf); 424 if (skb == NULL) 425 return -ENOMEM; 426 427 if (isrc & RMLIS) { 428 /* data overrun interrupt */ 429 dev_dbg(dev->dev.parent, "data overrun interrupt\n"); 430 cf->can_id |= CAN_ERR_CRTL; 431 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 432 stats->rx_over_errors++; 433 stats->rx_errors++; 434 } 435 436 if (isrc & BOIS) { 437 dev_dbg(dev->dev.parent, "bus-off mode interrupt\n"); 438 state = CAN_STATE_BUS_OFF; 439 cf->can_id |= CAN_ERR_BUSOFF; 440 can_bus_off(dev); 441 } 442 443 if (isrc & EPIS) { 444 /* error passive interrupt */ 445 dev_dbg(dev->dev.parent, "error passive interrupt\n"); 446 state = CAN_STATE_ERROR_PASSIVE; 447 } 448 449 if ((isrc & EWTIS) || (isrc & EWRIS)) { 450 dev_dbg(dev->dev.parent, 451 "Error Warning Transmit/Receive Interrupt\n"); 452 state = CAN_STATE_ERROR_WARNING; 453 } 454 455 if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING || 456 state == CAN_STATE_ERROR_PASSIVE)) { 457 u16 cec = bfin_read16(®->cec); 458 u8 rxerr = cec; 459 u8 txerr = cec >> 8; 460 461 cf->can_id |= CAN_ERR_CRTL; 462 if (state == CAN_STATE_ERROR_WARNING) { 463 priv->can.can_stats.error_warning++; 464 cf->data[1] = (txerr > rxerr) ? 465 CAN_ERR_CRTL_TX_WARNING : 466 CAN_ERR_CRTL_RX_WARNING; 467 } else { 468 priv->can.can_stats.error_passive++; 469 cf->data[1] = (txerr > rxerr) ? 470 CAN_ERR_CRTL_TX_PASSIVE : 471 CAN_ERR_CRTL_RX_PASSIVE; 472 } 473 } 474 475 if (status) { 476 priv->can.can_stats.bus_error++; 477 478 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 479 480 if (status & BEF) 481 cf->data[2] |= CAN_ERR_PROT_BIT; 482 else if (status & FER) 483 cf->data[2] |= CAN_ERR_PROT_FORM; 484 else if (status & SER) 485 cf->data[2] |= CAN_ERR_PROT_STUFF; 486 else 487 cf->data[2] |= CAN_ERR_PROT_UNSPEC; 488 } 489 490 priv->can.state = state; 491 492 netif_rx(skb); 493 494 stats->rx_packets++; 495 stats->rx_bytes += cf->can_dlc; 496 497 return 0; 498} 499 500irqreturn_t bfin_can_interrupt(int irq, void *dev_id) 501{ 502 struct net_device *dev = dev_id; 503 struct bfin_can_priv *priv = netdev_priv(dev); 504 struct bfin_can_regs __iomem *reg = priv->membase; 505 struct net_device_stats *stats = &dev->stats; 506 u16 status, isrc; 507 508 if ((irq == priv->tx_irq) && bfin_read16(®->mbtif2)) { 509 /* transmission complete interrupt */ 510 bfin_write16(®->mbtif2, 0xFFFF); 511 stats->tx_packets++; 512 stats->tx_bytes += bfin_read16(®->chl[TRANSMIT_CHL].dlc); 513 can_get_echo_skb(dev, 0); 514 netif_wake_queue(dev); 515 } else if ((irq == priv->rx_irq) && bfin_read16(®->mbrif1)) { 516 /* receive interrupt */ 517 isrc = bfin_read16(®->mbrif1); 518 bfin_write16(®->mbrif1, 0xFFFF); 519 bfin_can_rx(dev, isrc); 520 } else if ((irq == priv->err_irq) && bfin_read16(®->gis)) { 521 /* error interrupt */ 522 isrc = bfin_read16(®->gis); 523 status = bfin_read16(®->esr); 524 bfin_write16(®->gis, 0x7FF); 525 bfin_can_err(dev, isrc, status); 526 } else { 527 return IRQ_NONE; 528 } 529 530 return IRQ_HANDLED; 531} 532 533static int bfin_can_open(struct net_device *dev) 534{ 535 struct bfin_can_priv *priv = netdev_priv(dev); 536 int err; 537 538 /* set chip into reset mode */ 539 bfin_can_set_reset_mode(dev); 540 541 /* common open */ 542 err = open_candev(dev); 543 if (err) 544 goto exit_open; 545 546 /* register interrupt handler */ 547 err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0, 548 "bfin-can-rx", dev); 549 if (err) 550 goto exit_rx_irq; 551 err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0, 552 "bfin-can-tx", dev); 553 if (err) 554 goto exit_tx_irq; 555 err = request_irq(priv->err_irq, &bfin_can_interrupt, 0, 556 "bfin-can-err", dev); 557 if (err) 558 goto exit_err_irq; 559 560 bfin_can_start(dev); 561 562 netif_start_queue(dev); 563 564 return 0; 565 566exit_err_irq: 567 free_irq(priv->tx_irq, dev); 568exit_tx_irq: 569 free_irq(priv->rx_irq, dev); 570exit_rx_irq: 571 close_candev(dev); 572exit_open: 573 return err; 574} 575 576static int bfin_can_close(struct net_device *dev) 577{ 578 struct bfin_can_priv *priv = netdev_priv(dev); 579 580 netif_stop_queue(dev); 581 bfin_can_set_reset_mode(dev); 582 583 close_candev(dev); 584 585 free_irq(priv->rx_irq, dev); 586 free_irq(priv->tx_irq, dev); 587 free_irq(priv->err_irq, dev); 588 589 return 0; 590} 591 592struct net_device *alloc_bfin_candev(void) 593{ 594 struct net_device *dev; 595 struct bfin_can_priv *priv; 596 597 dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX); 598 if (!dev) 599 return NULL; 600 601 priv = netdev_priv(dev); 602 603 priv->dev = dev; 604 priv->can.bittiming_const = &bfin_can_bittiming_const; 605 priv->can.do_set_bittiming = bfin_can_set_bittiming; 606 priv->can.do_set_mode = bfin_can_set_mode; 607 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 608 609 return dev; 610} 611 612static const struct net_device_ops bfin_can_netdev_ops = { 613 .ndo_open = bfin_can_open, 614 .ndo_stop = bfin_can_close, 615 .ndo_start_xmit = bfin_can_start_xmit, 616}; 617 618static int __devinit bfin_can_probe(struct platform_device *pdev) 619{ 620 int err; 621 struct net_device *dev; 622 struct bfin_can_priv *priv; 623 struct resource *res_mem, *rx_irq, *tx_irq, *err_irq; 624 unsigned short *pdata; 625 626 pdata = pdev->dev.platform_data; 627 if (!pdata) { 628 dev_err(&pdev->dev, "No platform data provided!\n"); 629 err = -EINVAL; 630 goto exit; 631 } 632 633 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 634 rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 635 tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 636 err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2); 637 if (!res_mem || !rx_irq || !tx_irq || !err_irq) { 638 err = -EINVAL; 639 goto exit; 640 } 641 642 if (!request_mem_region(res_mem->start, resource_size(res_mem), 643 dev_name(&pdev->dev))) { 644 err = -EBUSY; 645 goto exit; 646 } 647 648 /* request peripheral pins */ 649 err = peripheral_request_list(pdata, dev_name(&pdev->dev)); 650 if (err) 651 goto exit_mem_release; 652 653 dev = alloc_bfin_candev(); 654 if (!dev) { 655 err = -ENOMEM; 656 goto exit_peri_pin_free; 657 } 658 659 priv = netdev_priv(dev); 660 priv->membase = (void __iomem *)res_mem->start; 661 priv->rx_irq = rx_irq->start; 662 priv->tx_irq = tx_irq->start; 663 priv->err_irq = err_irq->start; 664 priv->pin_list = pdata; 665 priv->can.clock.freq = get_sclk(); 666 667 dev_set_drvdata(&pdev->dev, dev); 668 SET_NETDEV_DEV(dev, &pdev->dev); 669 670 dev->flags |= IFF_ECHO; /* we support local echo */ 671 dev->netdev_ops = &bfin_can_netdev_ops; 672 673 bfin_can_set_reset_mode(dev); 674 675 err = register_candev(dev); 676 if (err) { 677 dev_err(&pdev->dev, "registering failed (err=%d)\n", err); 678 goto exit_candev_free; 679 } 680 681 dev_info(&pdev->dev, 682 "%s device registered" 683 "(®_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n", 684 DRV_NAME, (void *)priv->membase, priv->rx_irq, 685 priv->tx_irq, priv->err_irq, priv->can.clock.freq); 686 return 0; 687 688exit_candev_free: 689 free_candev(dev); 690exit_peri_pin_free: 691 peripheral_free_list(pdata); 692exit_mem_release: 693 release_mem_region(res_mem->start, resource_size(res_mem)); 694exit: 695 return err; 696} 697 698static int __devexit bfin_can_remove(struct platform_device *pdev) 699{ 700 struct net_device *dev = dev_get_drvdata(&pdev->dev); 701 struct bfin_can_priv *priv = netdev_priv(dev); 702 struct resource *res; 703 704 bfin_can_set_reset_mode(dev); 705 706 unregister_candev(dev); 707 708 dev_set_drvdata(&pdev->dev, NULL); 709 710 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 711 release_mem_region(res->start, resource_size(res)); 712 713 peripheral_free_list(priv->pin_list); 714 715 free_candev(dev); 716 return 0; 717} 718 719#ifdef CONFIG_PM 720static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg) 721{ 722 struct net_device *dev = dev_get_drvdata(&pdev->dev); 723 struct bfin_can_priv *priv = netdev_priv(dev); 724 struct bfin_can_regs __iomem *reg = priv->membase; 725 int timeout = BFIN_CAN_TIMEOUT; 726 727 if (netif_running(dev)) { 728 /* enter sleep mode */ 729 bfin_write16(®->ctrl, bfin_read16(®->ctrl) | SMR); 730 SSYNC(); 731 while (!(bfin_read16(®->intr) & SMACK)) { 732 udelay(10); 733 if (--timeout == 0) { 734 dev_err(dev->dev.parent, 735 "fail to enter sleep mode\n"); 736 BUG(); 737 } 738 } 739 } 740 741 return 0; 742} 743 744static int bfin_can_resume(struct platform_device *pdev) 745{ 746 struct net_device *dev = dev_get_drvdata(&pdev->dev); 747 struct bfin_can_priv *priv = netdev_priv(dev); 748 struct bfin_can_regs __iomem *reg = priv->membase; 749 750 if (netif_running(dev)) { 751 /* leave sleep mode */ 752 bfin_write16(®->intr, 0); 753 SSYNC(); 754 } 755 756 return 0; 757} 758#else 759#define bfin_can_suspend NULL 760#define bfin_can_resume NULL 761#endif /* CONFIG_PM */ 762 763static struct platform_driver bfin_can_driver = { 764 .probe = bfin_can_probe, 765 .remove = __devexit_p(bfin_can_remove), 766 .suspend = bfin_can_suspend, 767 .resume = bfin_can_resume, 768 .driver = { 769 .name = DRV_NAME, 770 .owner = THIS_MODULE, 771 }, 772}; 773 774static int __init bfin_can_init(void) 775{ 776 return platform_driver_register(&bfin_can_driver); 777} 778module_init(bfin_can_init); 779 780static void __exit bfin_can_exit(void) 781{ 782 platform_driver_unregister(&bfin_can_driver); 783} 784module_exit(bfin_can_exit); 785 786MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); 787MODULE_LICENSE("GPL"); 788MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver"); 789