pxaficp_ir.c revision 6ed106549d17474ca17a16057f4c0ed4eba5a7ca
1/* 2 * linux/drivers/net/irda/pxaficp_ir.c 3 * 4 * Based on sa1100_ir.c by Russell King 5 * 6 * Changes copyright (C) 2003-2005 MontaVista Software, Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor 13 * 14 */ 15#include <linux/module.h> 16#include <linux/netdevice.h> 17#include <linux/etherdevice.h> 18#include <linux/platform_device.h> 19#include <linux/clk.h> 20 21#include <net/irda/irda.h> 22#include <net/irda/irmod.h> 23#include <net/irda/wrapper.h> 24#include <net/irda/irda_device.h> 25 26#include <mach/dma.h> 27#include <mach/irda.h> 28#include <mach/regs-uart.h> 29#include <mach/regs-ost.h> 30 31#define FICP __REG(0x40800000) /* Start of FICP area */ 32#define ICCR0 __REG(0x40800000) /* ICP Control Register 0 */ 33#define ICCR1 __REG(0x40800004) /* ICP Control Register 1 */ 34#define ICCR2 __REG(0x40800008) /* ICP Control Register 2 */ 35#define ICDR __REG(0x4080000c) /* ICP Data Register */ 36#define ICSR0 __REG(0x40800014) /* ICP Status Register 0 */ 37#define ICSR1 __REG(0x40800018) /* ICP Status Register 1 */ 38 39#define ICCR0_AME (1 << 7) /* Address match enable */ 40#define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */ 41#define ICCR0_RIE (1 << 5) /* Recieve FIFO interrupt enable */ 42#define ICCR0_RXE (1 << 4) /* Receive enable */ 43#define ICCR0_TXE (1 << 3) /* Transmit enable */ 44#define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */ 45#define ICCR0_LBM (1 << 1) /* Loopback mode */ 46#define ICCR0_ITR (1 << 0) /* IrDA transmission */ 47 48#define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */ 49#define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */ 50#define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */ 51#define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */ 52#define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */ 53#define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */ 54 55#ifdef CONFIG_PXA27x 56#define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */ 57#endif 58#define ICSR0_FRE (1 << 5) /* Framing error */ 59#define ICSR0_RFS (1 << 4) /* Receive FIFO service request */ 60#define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */ 61#define ICSR0_RAB (1 << 2) /* Receiver abort */ 62#define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */ 63#define ICSR0_EIF (1 << 0) /* End/Error in FIFO */ 64 65#define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */ 66#define ICSR1_CRE (1 << 5) /* CRC error */ 67#define ICSR1_EOF (1 << 4) /* End of frame */ 68#define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */ 69#define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */ 70#define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */ 71#define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */ 72 73#define IrSR_RXPL_NEG_IS_ZERO (1<<4) 74#define IrSR_RXPL_POS_IS_ZERO 0x0 75#define IrSR_TXPL_NEG_IS_ZERO (1<<3) 76#define IrSR_TXPL_POS_IS_ZERO 0x0 77#define IrSR_XMODE_PULSE_1_6 (1<<2) 78#define IrSR_XMODE_PULSE_3_16 0x0 79#define IrSR_RCVEIR_IR_MODE (1<<1) 80#define IrSR_RCVEIR_UART_MODE 0x0 81#define IrSR_XMITIR_IR_MODE (1<<0) 82#define IrSR_XMITIR_UART_MODE 0x0 83 84#define IrSR_IR_RECEIVE_ON (\ 85 IrSR_RXPL_NEG_IS_ZERO | \ 86 IrSR_TXPL_POS_IS_ZERO | \ 87 IrSR_XMODE_PULSE_3_16 | \ 88 IrSR_RCVEIR_IR_MODE | \ 89 IrSR_XMITIR_UART_MODE) 90 91#define IrSR_IR_TRANSMIT_ON (\ 92 IrSR_RXPL_NEG_IS_ZERO | \ 93 IrSR_TXPL_POS_IS_ZERO | \ 94 IrSR_XMODE_PULSE_3_16 | \ 95 IrSR_RCVEIR_UART_MODE | \ 96 IrSR_XMITIR_IR_MODE) 97 98struct pxa_irda { 99 int speed; 100 int newspeed; 101 unsigned long last_oscr; 102 103 unsigned char *dma_rx_buff; 104 unsigned char *dma_tx_buff; 105 dma_addr_t dma_rx_buff_phy; 106 dma_addr_t dma_tx_buff_phy; 107 unsigned int dma_tx_buff_len; 108 int txdma; 109 int rxdma; 110 111 struct irlap_cb *irlap; 112 struct qos_info qos; 113 114 iobuff_t tx_buff; 115 iobuff_t rx_buff; 116 117 struct device *dev; 118 struct pxaficp_platform_data *pdata; 119 struct clk *fir_clk; 120 struct clk *sir_clk; 121 struct clk *cur_clk; 122}; 123 124static inline void pxa_irda_disable_clk(struct pxa_irda *si) 125{ 126 if (si->cur_clk) 127 clk_disable(si->cur_clk); 128 si->cur_clk = NULL; 129} 130 131static inline void pxa_irda_enable_firclk(struct pxa_irda *si) 132{ 133 si->cur_clk = si->fir_clk; 134 clk_enable(si->fir_clk); 135} 136 137static inline void pxa_irda_enable_sirclk(struct pxa_irda *si) 138{ 139 si->cur_clk = si->sir_clk; 140 clk_enable(si->sir_clk); 141} 142 143 144#define IS_FIR(si) ((si)->speed >= 4000000) 145#define IRDA_FRAME_SIZE_LIMIT 2047 146 147inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si) 148{ 149 DCSR(si->rxdma) = DCSR_NODESC; 150 DSADR(si->rxdma) = __PREG(ICDR); 151 DTADR(si->rxdma) = si->dma_rx_buff_phy; 152 DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT; 153 DCSR(si->rxdma) |= DCSR_RUN; 154} 155 156inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si) 157{ 158 DCSR(si->txdma) = DCSR_NODESC; 159 DSADR(si->txdma) = si->dma_tx_buff_phy; 160 DTADR(si->txdma) = __PREG(ICDR); 161 DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len; 162 DCSR(si->txdma) |= DCSR_RUN; 163} 164 165/* 166 * Set the IrDA communications speed. 167 */ 168static int pxa_irda_set_speed(struct pxa_irda *si, int speed) 169{ 170 unsigned long flags; 171 unsigned int divisor; 172 173 switch (speed) { 174 case 9600: case 19200: case 38400: 175 case 57600: case 115200: 176 177 /* refer to PXA250/210 Developer's Manual 10-7 */ 178 /* BaudRate = 14.7456 MHz / (16*Divisor) */ 179 divisor = 14745600 / (16 * speed); 180 181 local_irq_save(flags); 182 183 if (IS_FIR(si)) { 184 /* stop RX DMA */ 185 DCSR(si->rxdma) &= ~DCSR_RUN; 186 /* disable FICP */ 187 ICCR0 = 0; 188 pxa_irda_disable_clk(si); 189 190 /* set board transceiver to SIR mode */ 191 si->pdata->transceiver_mode(si->dev, IR_SIRMODE); 192 193 /* enable the STUART clock */ 194 pxa_irda_enable_sirclk(si); 195 } 196 197 /* disable STUART first */ 198 STIER = 0; 199 200 /* access DLL & DLH */ 201 STLCR |= LCR_DLAB; 202 STDLL = divisor & 0xff; 203 STDLH = divisor >> 8; 204 STLCR &= ~LCR_DLAB; 205 206 si->speed = speed; 207 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6; 208 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE; 209 210 local_irq_restore(flags); 211 break; 212 213 case 4000000: 214 local_irq_save(flags); 215 216 /* disable STUART */ 217 STIER = 0; 218 STISR = 0; 219 pxa_irda_disable_clk(si); 220 221 /* disable FICP first */ 222 ICCR0 = 0; 223 224 /* set board transceiver to FIR mode */ 225 si->pdata->transceiver_mode(si->dev, IR_FIRMODE); 226 227 /* enable the FICP clock */ 228 pxa_irda_enable_firclk(si); 229 230 si->speed = speed; 231 pxa_irda_fir_dma_rx_start(si); 232 ICCR0 = ICCR0_ITR | ICCR0_RXE; 233 234 local_irq_restore(flags); 235 break; 236 237 default: 238 return -EINVAL; 239 } 240 241 return 0; 242} 243 244/* SIR interrupt service routine. */ 245static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id) 246{ 247 struct net_device *dev = dev_id; 248 struct pxa_irda *si = netdev_priv(dev); 249 int iir, lsr, data; 250 251 iir = STIIR; 252 253 switch (iir & 0x0F) { 254 case 0x06: /* Receiver Line Status */ 255 lsr = STLSR; 256 while (lsr & LSR_FIFOE) { 257 data = STRBR; 258 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) { 259 printk(KERN_DEBUG "pxa_ir: sir receiving error\n"); 260 dev->stats.rx_errors++; 261 if (lsr & LSR_FE) 262 dev->stats.rx_frame_errors++; 263 if (lsr & LSR_OE) 264 dev->stats.rx_fifo_errors++; 265 } else { 266 dev->stats.rx_bytes++; 267 async_unwrap_char(dev, &dev->stats, 268 &si->rx_buff, data); 269 } 270 lsr = STLSR; 271 } 272 si->last_oscr = OSCR; 273 break; 274 275 case 0x04: /* Received Data Available */ 276 /* forth through */ 277 278 case 0x0C: /* Character Timeout Indication */ 279 do { 280 dev->stats.rx_bytes++; 281 async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR); 282 } while (STLSR & LSR_DR); 283 si->last_oscr = OSCR; 284 break; 285 286 case 0x02: /* Transmit FIFO Data Request */ 287 while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) { 288 STTHR = *si->tx_buff.data++; 289 si->tx_buff.len -= 1; 290 } 291 292 if (si->tx_buff.len == 0) { 293 dev->stats.tx_packets++; 294 dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head; 295 296 /* We need to ensure that the transmitter has finished. */ 297 while ((STLSR & LSR_TEMT) == 0) 298 cpu_relax(); 299 si->last_oscr = OSCR; 300 301 /* 302 * Ok, we've finished transmitting. Now enable 303 * the receiver. Sometimes we get a receive IRQ 304 * immediately after a transmit... 305 */ 306 if (si->newspeed) { 307 pxa_irda_set_speed(si, si->newspeed); 308 si->newspeed = 0; 309 } else { 310 /* enable IR Receiver, disable IR Transmitter */ 311 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6; 312 /* enable STUART and receive interrupts */ 313 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE; 314 } 315 /* I'm hungry! */ 316 netif_wake_queue(dev); 317 } 318 break; 319 } 320 321 return IRQ_HANDLED; 322} 323 324/* FIR Receive DMA interrupt handler */ 325static void pxa_irda_fir_dma_rx_irq(int channel, void *data) 326{ 327 int dcsr = DCSR(channel); 328 329 DCSR(channel) = dcsr & ~DCSR_RUN; 330 331 printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr); 332} 333 334/* FIR Transmit DMA interrupt handler */ 335static void pxa_irda_fir_dma_tx_irq(int channel, void *data) 336{ 337 struct net_device *dev = data; 338 struct pxa_irda *si = netdev_priv(dev); 339 int dcsr; 340 341 dcsr = DCSR(channel); 342 DCSR(channel) = dcsr & ~DCSR_RUN; 343 344 if (dcsr & DCSR_ENDINTR) { 345 dev->stats.tx_packets++; 346 dev->stats.tx_bytes += si->dma_tx_buff_len; 347 } else { 348 dev->stats.tx_errors++; 349 } 350 351 while (ICSR1 & ICSR1_TBY) 352 cpu_relax(); 353 si->last_oscr = OSCR; 354 355 /* 356 * HACK: It looks like the TBY bit is dropped too soon. 357 * Without this delay things break. 358 */ 359 udelay(120); 360 361 if (si->newspeed) { 362 pxa_irda_set_speed(si, si->newspeed); 363 si->newspeed = 0; 364 } else { 365 int i = 64; 366 367 ICCR0 = 0; 368 pxa_irda_fir_dma_rx_start(si); 369 while ((ICSR1 & ICSR1_RNE) && i--) 370 (void)ICDR; 371 ICCR0 = ICCR0_ITR | ICCR0_RXE; 372 373 if (i < 0) 374 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n"); 375 } 376 netif_wake_queue(dev); 377} 378 379/* EIF(Error in FIFO/End in Frame) handler for FIR */ 380static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0) 381{ 382 unsigned int len, stat, data; 383 384 /* Get the current data position. */ 385 len = DTADR(si->rxdma) - si->dma_rx_buff_phy; 386 387 do { 388 /* Read Status, and then Data. */ 389 stat = ICSR1; 390 rmb(); 391 data = ICDR; 392 393 if (stat & (ICSR1_CRE | ICSR1_ROR)) { 394 dev->stats.rx_errors++; 395 if (stat & ICSR1_CRE) { 396 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); 397 dev->stats.rx_crc_errors++; 398 } 399 if (stat & ICSR1_ROR) { 400 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); 401 dev->stats.rx_over_errors++; 402 } 403 } else { 404 si->dma_rx_buff[len++] = data; 405 } 406 /* If we hit the end of frame, there's no point in continuing. */ 407 if (stat & ICSR1_EOF) 408 break; 409 } while (ICSR0 & ICSR0_EIF); 410 411 if (stat & ICSR1_EOF) { 412 /* end of frame. */ 413 struct sk_buff *skb; 414 415 if (icsr0 & ICSR0_FRE) { 416 printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); 417 dev->stats.rx_dropped++; 418 return; 419 } 420 421 skb = alloc_skb(len+1,GFP_ATOMIC); 422 if (!skb) { 423 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); 424 dev->stats.rx_dropped++; 425 return; 426 } 427 428 /* Align IP header to 20 bytes */ 429 skb_reserve(skb, 1); 430 skb_copy_to_linear_data(skb, si->dma_rx_buff, len); 431 skb_put(skb, len); 432 433 /* Feed it to IrLAP */ 434 skb->dev = dev; 435 skb_reset_mac_header(skb); 436 skb->protocol = htons(ETH_P_IRDA); 437 netif_rx(skb); 438 439 dev->stats.rx_packets++; 440 dev->stats.rx_bytes += len; 441 } 442} 443 444/* FIR interrupt handler */ 445static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id) 446{ 447 struct net_device *dev = dev_id; 448 struct pxa_irda *si = netdev_priv(dev); 449 int icsr0, i = 64; 450 451 /* stop RX DMA */ 452 DCSR(si->rxdma) &= ~DCSR_RUN; 453 si->last_oscr = OSCR; 454 icsr0 = ICSR0; 455 456 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) { 457 if (icsr0 & ICSR0_FRE) { 458 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n"); 459 dev->stats.rx_frame_errors++; 460 } else { 461 printk(KERN_DEBUG "pxa_ir: fir receive abort\n"); 462 dev->stats.rx_errors++; 463 } 464 ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB); 465 } 466 467 if (icsr0 & ICSR0_EIF) { 468 /* An error in FIFO occured, or there is a end of frame */ 469 pxa_irda_fir_irq_eif(si, dev, icsr0); 470 } 471 472 ICCR0 = 0; 473 pxa_irda_fir_dma_rx_start(si); 474 while ((ICSR1 & ICSR1_RNE) && i--) 475 (void)ICDR; 476 ICCR0 = ICCR0_ITR | ICCR0_RXE; 477 478 if (i < 0) 479 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n"); 480 481 return IRQ_HANDLED; 482} 483 484/* hard_xmit interface of irda device */ 485static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) 486{ 487 struct pxa_irda *si = netdev_priv(dev); 488 int speed = irda_get_next_speed(skb); 489 490 /* 491 * Does this packet contain a request to change the interface 492 * speed? If so, remember it until we complete the transmission 493 * of this frame. 494 */ 495 if (speed != si->speed && speed != -1) 496 si->newspeed = speed; 497 498 /* 499 * If this is an empty frame, we can bypass a lot. 500 */ 501 if (skb->len == 0) { 502 if (si->newspeed) { 503 si->newspeed = 0; 504 pxa_irda_set_speed(si, speed); 505 } 506 dev_kfree_skb(skb); 507 return NETDEV_TX_OK; 508 } 509 510 netif_stop_queue(dev); 511 512 if (!IS_FIR(si)) { 513 si->tx_buff.data = si->tx_buff.head; 514 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); 515 516 /* Disable STUART interrupts and switch to transmit mode. */ 517 STIER = 0; 518 STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6; 519 520 /* enable STUART and transmit interrupts */ 521 STIER = IER_UUE | IER_TIE; 522 } else { 523 unsigned long mtt = irda_get_mtt(skb); 524 525 si->dma_tx_buff_len = skb->len; 526 skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); 527 528 if (mtt) 529 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) 530 cpu_relax(); 531 532 /* stop RX DMA, disable FICP */ 533 DCSR(si->rxdma) &= ~DCSR_RUN; 534 ICCR0 = 0; 535 536 pxa_irda_fir_dma_tx_start(si); 537 ICCR0 = ICCR0_ITR | ICCR0_TXE; 538 } 539 540 dev_kfree_skb(skb); 541 dev->trans_start = jiffies; 542 return NETDEV_TX_OK; 543} 544 545static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) 546{ 547 struct if_irda_req *rq = (struct if_irda_req *)ifreq; 548 struct pxa_irda *si = netdev_priv(dev); 549 int ret; 550 551 switch (cmd) { 552 case SIOCSBANDWIDTH: 553 ret = -EPERM; 554 if (capable(CAP_NET_ADMIN)) { 555 /* 556 * We are unable to set the speed if the 557 * device is not running. 558 */ 559 if (netif_running(dev)) { 560 ret = pxa_irda_set_speed(si, 561 rq->ifr_baudrate); 562 } else { 563 printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n"); 564 ret = 0; 565 } 566 } 567 break; 568 569 case SIOCSMEDIABUSY: 570 ret = -EPERM; 571 if (capable(CAP_NET_ADMIN)) { 572 irda_device_set_media_busy(dev, TRUE); 573 ret = 0; 574 } 575 break; 576 577 case SIOCGRECEIVING: 578 ret = 0; 579 rq->ifr_receiving = IS_FIR(si) ? 0 580 : si->rx_buff.state != OUTSIDE_FRAME; 581 break; 582 583 default: 584 ret = -EOPNOTSUPP; 585 break; 586 } 587 588 return ret; 589} 590 591static void pxa_irda_startup(struct pxa_irda *si) 592{ 593 /* Disable STUART interrupts */ 594 STIER = 0; 595 /* enable STUART interrupt to the processor */ 596 STMCR = MCR_OUT2; 597 /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */ 598 STLCR = LCR_WLS0 | LCR_WLS1; 599 /* enable FIFO, we use FIFO to improve performance */ 600 STFCR = FCR_TRFIFOE | FCR_ITL_32; 601 602 /* disable FICP */ 603 ICCR0 = 0; 604 /* configure FICP ICCR2 */ 605 ICCR2 = ICCR2_TXP | ICCR2_TRIG_32; 606 607 /* configure DMAC */ 608 DRCMR(17) = si->rxdma | DRCMR_MAPVLD; 609 DRCMR(18) = si->txdma | DRCMR_MAPVLD; 610 611 /* force SIR reinitialization */ 612 si->speed = 4000000; 613 pxa_irda_set_speed(si, 9600); 614 615 printk(KERN_DEBUG "pxa_ir: irda startup\n"); 616} 617 618static void pxa_irda_shutdown(struct pxa_irda *si) 619{ 620 unsigned long flags; 621 622 local_irq_save(flags); 623 624 /* disable STUART and interrupt */ 625 STIER = 0; 626 /* disable STUART SIR mode */ 627 STISR = 0; 628 629 /* disable DMA */ 630 DCSR(si->txdma) &= ~DCSR_RUN; 631 DCSR(si->rxdma) &= ~DCSR_RUN; 632 /* disable FICP */ 633 ICCR0 = 0; 634 635 /* disable the STUART or FICP clocks */ 636 pxa_irda_disable_clk(si); 637 638 DRCMR(17) = 0; 639 DRCMR(18) = 0; 640 641 local_irq_restore(flags); 642 643 /* power off board transceiver */ 644 si->pdata->transceiver_mode(si->dev, IR_OFF); 645 646 printk(KERN_DEBUG "pxa_ir: irda shutdown\n"); 647} 648 649static int pxa_irda_start(struct net_device *dev) 650{ 651 struct pxa_irda *si = netdev_priv(dev); 652 int err; 653 654 si->speed = 9600; 655 656 err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev); 657 if (err) 658 goto err_irq1; 659 660 err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev); 661 if (err) 662 goto err_irq2; 663 664 /* 665 * The interrupt must remain disabled for now. 666 */ 667 disable_irq(IRQ_STUART); 668 disable_irq(IRQ_ICP); 669 670 err = -EBUSY; 671 si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev); 672 if (si->rxdma < 0) 673 goto err_rx_dma; 674 675 si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev); 676 if (si->txdma < 0) 677 goto err_tx_dma; 678 679 err = -ENOMEM; 680 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, 681 &si->dma_rx_buff_phy, GFP_KERNEL ); 682 if (!si->dma_rx_buff) 683 goto err_dma_rx_buff; 684 685 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, 686 &si->dma_tx_buff_phy, GFP_KERNEL ); 687 if (!si->dma_tx_buff) 688 goto err_dma_tx_buff; 689 690 /* Setup the serial port for the initial speed. */ 691 pxa_irda_startup(si); 692 693 /* 694 * Open a new IrLAP layer instance. 695 */ 696 si->irlap = irlap_open(dev, &si->qos, "pxa"); 697 err = -ENOMEM; 698 if (!si->irlap) 699 goto err_irlap; 700 701 /* 702 * Now enable the interrupt and start the queue 703 */ 704 enable_irq(IRQ_STUART); 705 enable_irq(IRQ_ICP); 706 netif_start_queue(dev); 707 708 printk(KERN_DEBUG "pxa_ir: irda driver opened\n"); 709 710 return 0; 711 712err_irlap: 713 pxa_irda_shutdown(si); 714 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy); 715err_dma_tx_buff: 716 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy); 717err_dma_rx_buff: 718 pxa_free_dma(si->txdma); 719err_tx_dma: 720 pxa_free_dma(si->rxdma); 721err_rx_dma: 722 free_irq(IRQ_ICP, dev); 723err_irq2: 724 free_irq(IRQ_STUART, dev); 725err_irq1: 726 727 return err; 728} 729 730static int pxa_irda_stop(struct net_device *dev) 731{ 732 struct pxa_irda *si = netdev_priv(dev); 733 734 netif_stop_queue(dev); 735 736 pxa_irda_shutdown(si); 737 738 /* Stop IrLAP */ 739 if (si->irlap) { 740 irlap_close(si->irlap); 741 si->irlap = NULL; 742 } 743 744 free_irq(IRQ_STUART, dev); 745 free_irq(IRQ_ICP, dev); 746 747 pxa_free_dma(si->rxdma); 748 pxa_free_dma(si->txdma); 749 750 if (si->dma_rx_buff) 751 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy); 752 if (si->dma_tx_buff) 753 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy); 754 755 printk(KERN_DEBUG "pxa_ir: irda driver closed\n"); 756 return 0; 757} 758 759static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state) 760{ 761 struct net_device *dev = platform_get_drvdata(_dev); 762 struct pxa_irda *si; 763 764 if (dev && netif_running(dev)) { 765 si = netdev_priv(dev); 766 netif_device_detach(dev); 767 pxa_irda_shutdown(si); 768 } 769 770 return 0; 771} 772 773static int pxa_irda_resume(struct platform_device *_dev) 774{ 775 struct net_device *dev = platform_get_drvdata(_dev); 776 struct pxa_irda *si; 777 778 if (dev && netif_running(dev)) { 779 si = netdev_priv(dev); 780 pxa_irda_startup(si); 781 netif_device_attach(dev); 782 netif_wake_queue(dev); 783 } 784 785 return 0; 786} 787 788 789static int pxa_irda_init_iobuf(iobuff_t *io, int size) 790{ 791 io->head = kmalloc(size, GFP_KERNEL | GFP_DMA); 792 if (io->head != NULL) { 793 io->truesize = size; 794 io->in_frame = FALSE; 795 io->state = OUTSIDE_FRAME; 796 io->data = io->head; 797 } 798 return io->head ? 0 : -ENOMEM; 799} 800 801static const struct net_device_ops pxa_irda_netdev_ops = { 802 .ndo_open = pxa_irda_start, 803 .ndo_stop = pxa_irda_stop, 804 .ndo_start_xmit = pxa_irda_hard_xmit, 805 .ndo_do_ioctl = pxa_irda_ioctl, 806 .ndo_change_mtu = eth_change_mtu, 807 .ndo_validate_addr = eth_validate_addr, 808 .ndo_set_mac_address = eth_mac_addr, 809}; 810 811static int pxa_irda_probe(struct platform_device *pdev) 812{ 813 struct net_device *dev; 814 struct pxa_irda *si; 815 unsigned int baudrate_mask; 816 int err; 817 818 if (!pdev->dev.platform_data) 819 return -ENODEV; 820 821 err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY; 822 if (err) 823 goto err_mem_1; 824 825 err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY; 826 if (err) 827 goto err_mem_2; 828 829 dev = alloc_irdadev(sizeof(struct pxa_irda)); 830 if (!dev) 831 goto err_mem_3; 832 833 si = netdev_priv(dev); 834 si->dev = &pdev->dev; 835 si->pdata = pdev->dev.platform_data; 836 837 si->sir_clk = clk_get(&pdev->dev, "UARTCLK"); 838 si->fir_clk = clk_get(&pdev->dev, "FICPCLK"); 839 if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) { 840 err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk); 841 goto err_mem_4; 842 } 843 844 /* 845 * Initialise the SIR buffers 846 */ 847 err = pxa_irda_init_iobuf(&si->rx_buff, 14384); 848 if (err) 849 goto err_mem_4; 850 err = pxa_irda_init_iobuf(&si->tx_buff, 4000); 851 if (err) 852 goto err_mem_5; 853 854 if (si->pdata->startup) 855 err = si->pdata->startup(si->dev); 856 if (err) 857 goto err_startup; 858 859 dev->netdev_ops = &pxa_irda_netdev_ops; 860 861 irda_init_max_qos_capabilies(&si->qos); 862 863 baudrate_mask = 0; 864 if (si->pdata->transceiver_cap & IR_SIRMODE) 865 baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; 866 if (si->pdata->transceiver_cap & IR_FIRMODE) 867 baudrate_mask |= IR_4000000 << 8; 868 869 si->qos.baud_rate.bits &= baudrate_mask; 870 si->qos.min_turn_time.bits = 7; /* 1ms or more */ 871 872 irda_qos_bits_to_value(&si->qos); 873 874 err = register_netdev(dev); 875 876 if (err == 0) 877 dev_set_drvdata(&pdev->dev, dev); 878 879 if (err) { 880 if (si->pdata->shutdown) 881 si->pdata->shutdown(si->dev); 882err_startup: 883 kfree(si->tx_buff.head); 884err_mem_5: 885 kfree(si->rx_buff.head); 886err_mem_4: 887 if (si->sir_clk && !IS_ERR(si->sir_clk)) 888 clk_put(si->sir_clk); 889 if (si->fir_clk && !IS_ERR(si->fir_clk)) 890 clk_put(si->fir_clk); 891 free_netdev(dev); 892err_mem_3: 893 release_mem_region(__PREG(FICP), 0x1c); 894err_mem_2: 895 release_mem_region(__PREG(STUART), 0x24); 896 } 897err_mem_1: 898 return err; 899} 900 901static int pxa_irda_remove(struct platform_device *_dev) 902{ 903 struct net_device *dev = platform_get_drvdata(_dev); 904 905 if (dev) { 906 struct pxa_irda *si = netdev_priv(dev); 907 unregister_netdev(dev); 908 if (si->pdata->shutdown) 909 si->pdata->shutdown(si->dev); 910 kfree(si->tx_buff.head); 911 kfree(si->rx_buff.head); 912 clk_put(si->fir_clk); 913 clk_put(si->sir_clk); 914 free_netdev(dev); 915 } 916 917 release_mem_region(__PREG(STUART), 0x24); 918 release_mem_region(__PREG(FICP), 0x1c); 919 920 return 0; 921} 922 923static struct platform_driver pxa_ir_driver = { 924 .driver = { 925 .name = "pxa2xx-ir", 926 .owner = THIS_MODULE, 927 }, 928 .probe = pxa_irda_probe, 929 .remove = pxa_irda_remove, 930 .suspend = pxa_irda_suspend, 931 .resume = pxa_irda_resume, 932}; 933 934static int __init pxa_irda_init(void) 935{ 936 return platform_driver_register(&pxa_ir_driver); 937} 938 939static void __exit pxa_irda_exit(void) 940{ 941 platform_driver_unregister(&pxa_ir_driver); 942} 943 944module_init(pxa_irda_init); 945module_exit(pxa_irda_exit); 946 947MODULE_LICENSE("GPL"); 948MODULE_ALIAS("platform:pxa2xx-ir"); 949