pxaficp_ir.c revision 5bf3df3f00f507119a26ba0780aa8799e741615c
1/* 2 * linux/drivers/net/irda/pxaficp_ir.c 3 * 4 * Based on sa1100_ir.c by Russell King 5 * 6 * Changes copyright (C) 2003-2005 MontaVista Software, Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor 13 * 14 */ 15#include <linux/module.h> 16#include <linux/netdevice.h> 17#include <linux/platform_device.h> 18#include <linux/clk.h> 19 20#include <net/irda/irda.h> 21#include <net/irda/irmod.h> 22#include <net/irda/wrapper.h> 23#include <net/irda/irda_device.h> 24 25#include <mach/dma.h> 26#include <mach/irda.h> 27#include <mach/regs-uart.h> 28#include <mach/regs-ost.h> 29 30#define FICP __REG(0x40800000) /* Start of FICP area */ 31#define ICCR0 __REG(0x40800000) /* ICP Control Register 0 */ 32#define ICCR1 __REG(0x40800004) /* ICP Control Register 1 */ 33#define ICCR2 __REG(0x40800008) /* ICP Control Register 2 */ 34#define ICDR __REG(0x4080000c) /* ICP Data Register */ 35#define ICSR0 __REG(0x40800014) /* ICP Status Register 0 */ 36#define ICSR1 __REG(0x40800018) /* ICP Status Register 1 */ 37 38#define ICCR0_AME (1 << 7) /* Address match enable */ 39#define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */ 40#define ICCR0_RIE (1 << 5) /* Recieve FIFO interrupt enable */ 41#define ICCR0_RXE (1 << 4) /* Receive enable */ 42#define ICCR0_TXE (1 << 3) /* Transmit enable */ 43#define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */ 44#define ICCR0_LBM (1 << 1) /* Loopback mode */ 45#define ICCR0_ITR (1 << 0) /* IrDA transmission */ 46 47#define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */ 48#define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */ 49#define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */ 50#define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */ 51#define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */ 52#define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */ 53 54#ifdef CONFIG_PXA27x 55#define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */ 56#endif 57#define ICSR0_FRE (1 << 5) /* Framing error */ 58#define ICSR0_RFS (1 << 4) /* Receive FIFO service request */ 59#define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */ 60#define ICSR0_RAB (1 << 2) /* Receiver abort */ 61#define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */ 62#define ICSR0_EIF (1 << 0) /* End/Error in FIFO */ 63 64#define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */ 65#define ICSR1_CRE (1 << 5) /* CRC error */ 66#define ICSR1_EOF (1 << 4) /* End of frame */ 67#define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */ 68#define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */ 69#define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */ 70#define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */ 71 72#define IrSR_RXPL_NEG_IS_ZERO (1<<4) 73#define IrSR_RXPL_POS_IS_ZERO 0x0 74#define IrSR_TXPL_NEG_IS_ZERO (1<<3) 75#define IrSR_TXPL_POS_IS_ZERO 0x0 76#define IrSR_XMODE_PULSE_1_6 (1<<2) 77#define IrSR_XMODE_PULSE_3_16 0x0 78#define IrSR_RCVEIR_IR_MODE (1<<1) 79#define IrSR_RCVEIR_UART_MODE 0x0 80#define IrSR_XMITIR_IR_MODE (1<<0) 81#define IrSR_XMITIR_UART_MODE 0x0 82 83#define IrSR_IR_RECEIVE_ON (\ 84 IrSR_RXPL_NEG_IS_ZERO | \ 85 IrSR_TXPL_POS_IS_ZERO | \ 86 IrSR_XMODE_PULSE_3_16 | \ 87 IrSR_RCVEIR_IR_MODE | \ 88 IrSR_XMITIR_UART_MODE) 89 90#define IrSR_IR_TRANSMIT_ON (\ 91 IrSR_RXPL_NEG_IS_ZERO | \ 92 IrSR_TXPL_POS_IS_ZERO | \ 93 IrSR_XMODE_PULSE_3_16 | \ 94 IrSR_RCVEIR_UART_MODE | \ 95 IrSR_XMITIR_IR_MODE) 96 97struct pxa_irda { 98 int speed; 99 int newspeed; 100 unsigned long last_oscr; 101 102 unsigned char *dma_rx_buff; 103 unsigned char *dma_tx_buff; 104 dma_addr_t dma_rx_buff_phy; 105 dma_addr_t dma_tx_buff_phy; 106 unsigned int dma_tx_buff_len; 107 int txdma; 108 int rxdma; 109 110 struct irlap_cb *irlap; 111 struct qos_info qos; 112 113 iobuff_t tx_buff; 114 iobuff_t rx_buff; 115 116 struct device *dev; 117 struct pxaficp_platform_data *pdata; 118 struct clk *fir_clk; 119 struct clk *sir_clk; 120 struct clk *cur_clk; 121}; 122 123static inline void pxa_irda_disable_clk(struct pxa_irda *si) 124{ 125 if (si->cur_clk) 126 clk_disable(si->cur_clk); 127 si->cur_clk = NULL; 128} 129 130static inline void pxa_irda_enable_firclk(struct pxa_irda *si) 131{ 132 si->cur_clk = si->fir_clk; 133 clk_enable(si->fir_clk); 134} 135 136static inline void pxa_irda_enable_sirclk(struct pxa_irda *si) 137{ 138 si->cur_clk = si->sir_clk; 139 clk_enable(si->sir_clk); 140} 141 142 143#define IS_FIR(si) ((si)->speed >= 4000000) 144#define IRDA_FRAME_SIZE_LIMIT 2047 145 146inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si) 147{ 148 DCSR(si->rxdma) = DCSR_NODESC; 149 DSADR(si->rxdma) = __PREG(ICDR); 150 DTADR(si->rxdma) = si->dma_rx_buff_phy; 151 DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT; 152 DCSR(si->rxdma) |= DCSR_RUN; 153} 154 155inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si) 156{ 157 DCSR(si->txdma) = DCSR_NODESC; 158 DSADR(si->txdma) = si->dma_tx_buff_phy; 159 DTADR(si->txdma) = __PREG(ICDR); 160 DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len; 161 DCSR(si->txdma) |= DCSR_RUN; 162} 163 164/* 165 * Set the IrDA communications speed. 166 */ 167static int pxa_irda_set_speed(struct pxa_irda *si, int speed) 168{ 169 unsigned long flags; 170 unsigned int divisor; 171 172 switch (speed) { 173 case 9600: case 19200: case 38400: 174 case 57600: case 115200: 175 176 /* refer to PXA250/210 Developer's Manual 10-7 */ 177 /* BaudRate = 14.7456 MHz / (16*Divisor) */ 178 divisor = 14745600 / (16 * speed); 179 180 local_irq_save(flags); 181 182 if (IS_FIR(si)) { 183 /* stop RX DMA */ 184 DCSR(si->rxdma) &= ~DCSR_RUN; 185 /* disable FICP */ 186 ICCR0 = 0; 187 pxa_irda_disable_clk(si); 188 189 /* set board transceiver to SIR mode */ 190 si->pdata->transceiver_mode(si->dev, IR_SIRMODE); 191 192 /* enable the STUART clock */ 193 pxa_irda_enable_sirclk(si); 194 } 195 196 /* disable STUART first */ 197 STIER = 0; 198 199 /* access DLL & DLH */ 200 STLCR |= LCR_DLAB; 201 STDLL = divisor & 0xff; 202 STDLH = divisor >> 8; 203 STLCR &= ~LCR_DLAB; 204 205 si->speed = speed; 206 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6; 207 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE; 208 209 local_irq_restore(flags); 210 break; 211 212 case 4000000: 213 local_irq_save(flags); 214 215 /* disable STUART */ 216 STIER = 0; 217 STISR = 0; 218 pxa_irda_disable_clk(si); 219 220 /* disable FICP first */ 221 ICCR0 = 0; 222 223 /* set board transceiver to FIR mode */ 224 si->pdata->transceiver_mode(si->dev, IR_FIRMODE); 225 226 /* enable the FICP clock */ 227 pxa_irda_enable_firclk(si); 228 229 si->speed = speed; 230 pxa_irda_fir_dma_rx_start(si); 231 ICCR0 = ICCR0_ITR | ICCR0_RXE; 232 233 local_irq_restore(flags); 234 break; 235 236 default: 237 return -EINVAL; 238 } 239 240 return 0; 241} 242 243/* SIR interrupt service routine. */ 244static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id) 245{ 246 struct net_device *dev = dev_id; 247 struct pxa_irda *si = netdev_priv(dev); 248 int iir, lsr, data; 249 250 iir = STIIR; 251 252 switch (iir & 0x0F) { 253 case 0x06: /* Receiver Line Status */ 254 lsr = STLSR; 255 while (lsr & LSR_FIFOE) { 256 data = STRBR; 257 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) { 258 printk(KERN_DEBUG "pxa_ir: sir receiving error\n"); 259 dev->stats.rx_errors++; 260 if (lsr & LSR_FE) 261 dev->stats.rx_frame_errors++; 262 if (lsr & LSR_OE) 263 dev->stats.rx_fifo_errors++; 264 } else { 265 dev->stats.rx_bytes++; 266 async_unwrap_char(dev, &dev->stats, 267 &si->rx_buff, data); 268 } 269 lsr = STLSR; 270 } 271 si->last_oscr = OSCR; 272 break; 273 274 case 0x04: /* Received Data Available */ 275 /* forth through */ 276 277 case 0x0C: /* Character Timeout Indication */ 278 do { 279 dev->stats.rx_bytes++; 280 async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR); 281 } while (STLSR & LSR_DR); 282 si->last_oscr = OSCR; 283 break; 284 285 case 0x02: /* Transmit FIFO Data Request */ 286 while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) { 287 STTHR = *si->tx_buff.data++; 288 si->tx_buff.len -= 1; 289 } 290 291 if (si->tx_buff.len == 0) { 292 dev->stats.tx_packets++; 293 dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head; 294 295 /* We need to ensure that the transmitter has finished. */ 296 while ((STLSR & LSR_TEMT) == 0) 297 cpu_relax(); 298 si->last_oscr = OSCR; 299 300 /* 301 * Ok, we've finished transmitting. Now enable 302 * the receiver. Sometimes we get a receive IRQ 303 * immediately after a transmit... 304 */ 305 if (si->newspeed) { 306 pxa_irda_set_speed(si, si->newspeed); 307 si->newspeed = 0; 308 } else { 309 /* enable IR Receiver, disable IR Transmitter */ 310 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6; 311 /* enable STUART and receive interrupts */ 312 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE; 313 } 314 /* I'm hungry! */ 315 netif_wake_queue(dev); 316 } 317 break; 318 } 319 320 return IRQ_HANDLED; 321} 322 323/* FIR Receive DMA interrupt handler */ 324static void pxa_irda_fir_dma_rx_irq(int channel, void *data) 325{ 326 int dcsr = DCSR(channel); 327 328 DCSR(channel) = dcsr & ~DCSR_RUN; 329 330 printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr); 331} 332 333/* FIR Transmit DMA interrupt handler */ 334static void pxa_irda_fir_dma_tx_irq(int channel, void *data) 335{ 336 struct net_device *dev = data; 337 struct pxa_irda *si = netdev_priv(dev); 338 int dcsr; 339 340 dcsr = DCSR(channel); 341 DCSR(channel) = dcsr & ~DCSR_RUN; 342 343 if (dcsr & DCSR_ENDINTR) { 344 dev->stats.tx_packets++; 345 dev->stats.tx_bytes += si->dma_tx_buff_len; 346 } else { 347 dev->stats.tx_errors++; 348 } 349 350 while (ICSR1 & ICSR1_TBY) 351 cpu_relax(); 352 si->last_oscr = OSCR; 353 354 /* 355 * HACK: It looks like the TBY bit is dropped too soon. 356 * Without this delay things break. 357 */ 358 udelay(120); 359 360 if (si->newspeed) { 361 pxa_irda_set_speed(si, si->newspeed); 362 si->newspeed = 0; 363 } else { 364 int i = 64; 365 366 ICCR0 = 0; 367 pxa_irda_fir_dma_rx_start(si); 368 while ((ICSR1 & ICSR1_RNE) && i--) 369 (void)ICDR; 370 ICCR0 = ICCR0_ITR | ICCR0_RXE; 371 372 if (i < 0) 373 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n"); 374 } 375 netif_wake_queue(dev); 376} 377 378/* EIF(Error in FIFO/End in Frame) handler for FIR */ 379static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0) 380{ 381 unsigned int len, stat, data; 382 383 /* Get the current data position. */ 384 len = DTADR(si->rxdma) - si->dma_rx_buff_phy; 385 386 do { 387 /* Read Status, and then Data. */ 388 stat = ICSR1; 389 rmb(); 390 data = ICDR; 391 392 if (stat & (ICSR1_CRE | ICSR1_ROR)) { 393 dev->stats.rx_errors++; 394 if (stat & ICSR1_CRE) { 395 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); 396 dev->stats.rx_crc_errors++; 397 } 398 if (stat & ICSR1_ROR) { 399 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); 400 dev->stats.rx_over_errors++; 401 } 402 } else { 403 si->dma_rx_buff[len++] = data; 404 } 405 /* If we hit the end of frame, there's no point in continuing. */ 406 if (stat & ICSR1_EOF) 407 break; 408 } while (ICSR0 & ICSR0_EIF); 409 410 if (stat & ICSR1_EOF) { 411 /* end of frame. */ 412 struct sk_buff *skb; 413 414 if (icsr0 & ICSR0_FRE) { 415 printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); 416 dev->stats.rx_dropped++; 417 return; 418 } 419 420 skb = alloc_skb(len+1,GFP_ATOMIC); 421 if (!skb) { 422 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); 423 dev->stats.rx_dropped++; 424 return; 425 } 426 427 /* Align IP header to 20 bytes */ 428 skb_reserve(skb, 1); 429 skb_copy_to_linear_data(skb, si->dma_rx_buff, len); 430 skb_put(skb, len); 431 432 /* Feed it to IrLAP */ 433 skb->dev = dev; 434 skb_reset_mac_header(skb); 435 skb->protocol = htons(ETH_P_IRDA); 436 netif_rx(skb); 437 438 dev->stats.rx_packets++; 439 dev->stats.rx_bytes += len; 440 } 441} 442 443/* FIR interrupt handler */ 444static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id) 445{ 446 struct net_device *dev = dev_id; 447 struct pxa_irda *si = netdev_priv(dev); 448 int icsr0, i = 64; 449 450 /* stop RX DMA */ 451 DCSR(si->rxdma) &= ~DCSR_RUN; 452 si->last_oscr = OSCR; 453 icsr0 = ICSR0; 454 455 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) { 456 if (icsr0 & ICSR0_FRE) { 457 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n"); 458 dev->stats.rx_frame_errors++; 459 } else { 460 printk(KERN_DEBUG "pxa_ir: fir receive abort\n"); 461 dev->stats.rx_errors++; 462 } 463 ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB); 464 } 465 466 if (icsr0 & ICSR0_EIF) { 467 /* An error in FIFO occured, or there is a end of frame */ 468 pxa_irda_fir_irq_eif(si, dev, icsr0); 469 } 470 471 ICCR0 = 0; 472 pxa_irda_fir_dma_rx_start(si); 473 while ((ICSR1 & ICSR1_RNE) && i--) 474 (void)ICDR; 475 ICCR0 = ICCR0_ITR | ICCR0_RXE; 476 477 if (i < 0) 478 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n"); 479 480 return IRQ_HANDLED; 481} 482 483/* hard_xmit interface of irda device */ 484static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) 485{ 486 struct pxa_irda *si = netdev_priv(dev); 487 int speed = irda_get_next_speed(skb); 488 489 /* 490 * Does this packet contain a request to change the interface 491 * speed? If so, remember it until we complete the transmission 492 * of this frame. 493 */ 494 if (speed != si->speed && speed != -1) 495 si->newspeed = speed; 496 497 /* 498 * If this is an empty frame, we can bypass a lot. 499 */ 500 if (skb->len == 0) { 501 if (si->newspeed) { 502 si->newspeed = 0; 503 pxa_irda_set_speed(si, speed); 504 } 505 dev_kfree_skb(skb); 506 return 0; 507 } 508 509 netif_stop_queue(dev); 510 511 if (!IS_FIR(si)) { 512 si->tx_buff.data = si->tx_buff.head; 513 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize); 514 515 /* Disable STUART interrupts and switch to transmit mode. */ 516 STIER = 0; 517 STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6; 518 519 /* enable STUART and transmit interrupts */ 520 STIER = IER_UUE | IER_TIE; 521 } else { 522 unsigned long mtt = irda_get_mtt(skb); 523 524 si->dma_tx_buff_len = skb->len; 525 skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); 526 527 if (mtt) 528 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) 529 cpu_relax(); 530 531 /* stop RX DMA, disable FICP */ 532 DCSR(si->rxdma) &= ~DCSR_RUN; 533 ICCR0 = 0; 534 535 pxa_irda_fir_dma_tx_start(si); 536 ICCR0 = ICCR0_ITR | ICCR0_TXE; 537 } 538 539 dev_kfree_skb(skb); 540 dev->trans_start = jiffies; 541 return 0; 542} 543 544static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) 545{ 546 struct if_irda_req *rq = (struct if_irda_req *)ifreq; 547 struct pxa_irda *si = netdev_priv(dev); 548 int ret; 549 550 switch (cmd) { 551 case SIOCSBANDWIDTH: 552 ret = -EPERM; 553 if (capable(CAP_NET_ADMIN)) { 554 /* 555 * We are unable to set the speed if the 556 * device is not running. 557 */ 558 if (netif_running(dev)) { 559 ret = pxa_irda_set_speed(si, 560 rq->ifr_baudrate); 561 } else { 562 printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n"); 563 ret = 0; 564 } 565 } 566 break; 567 568 case SIOCSMEDIABUSY: 569 ret = -EPERM; 570 if (capable(CAP_NET_ADMIN)) { 571 irda_device_set_media_busy(dev, TRUE); 572 ret = 0; 573 } 574 break; 575 576 case SIOCGRECEIVING: 577 ret = 0; 578 rq->ifr_receiving = IS_FIR(si) ? 0 579 : si->rx_buff.state != OUTSIDE_FRAME; 580 break; 581 582 default: 583 ret = -EOPNOTSUPP; 584 break; 585 } 586 587 return ret; 588} 589 590static void pxa_irda_startup(struct pxa_irda *si) 591{ 592 /* Disable STUART interrupts */ 593 STIER = 0; 594 /* enable STUART interrupt to the processor */ 595 STMCR = MCR_OUT2; 596 /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */ 597 STLCR = LCR_WLS0 | LCR_WLS1; 598 /* enable FIFO, we use FIFO to improve performance */ 599 STFCR = FCR_TRFIFOE | FCR_ITL_32; 600 601 /* disable FICP */ 602 ICCR0 = 0; 603 /* configure FICP ICCR2 */ 604 ICCR2 = ICCR2_TXP | ICCR2_TRIG_32; 605 606 /* configure DMAC */ 607 DRCMR(17) = si->rxdma | DRCMR_MAPVLD; 608 DRCMR(18) = si->txdma | DRCMR_MAPVLD; 609 610 /* force SIR reinitialization */ 611 si->speed = 4000000; 612 pxa_irda_set_speed(si, 9600); 613 614 printk(KERN_DEBUG "pxa_ir: irda startup\n"); 615} 616 617static void pxa_irda_shutdown(struct pxa_irda *si) 618{ 619 unsigned long flags; 620 621 local_irq_save(flags); 622 623 /* disable STUART and interrupt */ 624 STIER = 0; 625 /* disable STUART SIR mode */ 626 STISR = 0; 627 628 /* disable DMA */ 629 DCSR(si->txdma) &= ~DCSR_RUN; 630 DCSR(si->rxdma) &= ~DCSR_RUN; 631 /* disable FICP */ 632 ICCR0 = 0; 633 634 /* disable the STUART or FICP clocks */ 635 pxa_irda_disable_clk(si); 636 637 DRCMR(17) = 0; 638 DRCMR(18) = 0; 639 640 local_irq_restore(flags); 641 642 /* power off board transceiver */ 643 si->pdata->transceiver_mode(si->dev, IR_OFF); 644 645 printk(KERN_DEBUG "pxa_ir: irda shutdown\n"); 646} 647 648static int pxa_irda_start(struct net_device *dev) 649{ 650 struct pxa_irda *si = netdev_priv(dev); 651 int err; 652 653 si->speed = 9600; 654 655 err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev); 656 if (err) 657 goto err_irq1; 658 659 err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev); 660 if (err) 661 goto err_irq2; 662 663 /* 664 * The interrupt must remain disabled for now. 665 */ 666 disable_irq(IRQ_STUART); 667 disable_irq(IRQ_ICP); 668 669 err = -EBUSY; 670 si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev); 671 if (si->rxdma < 0) 672 goto err_rx_dma; 673 674 si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev); 675 if (si->txdma < 0) 676 goto err_tx_dma; 677 678 err = -ENOMEM; 679 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, 680 &si->dma_rx_buff_phy, GFP_KERNEL ); 681 if (!si->dma_rx_buff) 682 goto err_dma_rx_buff; 683 684 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, 685 &si->dma_tx_buff_phy, GFP_KERNEL ); 686 if (!si->dma_tx_buff) 687 goto err_dma_tx_buff; 688 689 /* Setup the serial port for the initial speed. */ 690 pxa_irda_startup(si); 691 692 /* 693 * Open a new IrLAP layer instance. 694 */ 695 si->irlap = irlap_open(dev, &si->qos, "pxa"); 696 err = -ENOMEM; 697 if (!si->irlap) 698 goto err_irlap; 699 700 /* 701 * Now enable the interrupt and start the queue 702 */ 703 enable_irq(IRQ_STUART); 704 enable_irq(IRQ_ICP); 705 netif_start_queue(dev); 706 707 printk(KERN_DEBUG "pxa_ir: irda driver opened\n"); 708 709 return 0; 710 711err_irlap: 712 pxa_irda_shutdown(si); 713 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy); 714err_dma_tx_buff: 715 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy); 716err_dma_rx_buff: 717 pxa_free_dma(si->txdma); 718err_tx_dma: 719 pxa_free_dma(si->rxdma); 720err_rx_dma: 721 free_irq(IRQ_ICP, dev); 722err_irq2: 723 free_irq(IRQ_STUART, dev); 724err_irq1: 725 726 return err; 727} 728 729static int pxa_irda_stop(struct net_device *dev) 730{ 731 struct pxa_irda *si = netdev_priv(dev); 732 733 netif_stop_queue(dev); 734 735 pxa_irda_shutdown(si); 736 737 /* Stop IrLAP */ 738 if (si->irlap) { 739 irlap_close(si->irlap); 740 si->irlap = NULL; 741 } 742 743 free_irq(IRQ_STUART, dev); 744 free_irq(IRQ_ICP, dev); 745 746 pxa_free_dma(si->rxdma); 747 pxa_free_dma(si->txdma); 748 749 if (si->dma_rx_buff) 750 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy); 751 if (si->dma_tx_buff) 752 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy); 753 754 printk(KERN_DEBUG "pxa_ir: irda driver closed\n"); 755 return 0; 756} 757 758static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state) 759{ 760 struct net_device *dev = platform_get_drvdata(_dev); 761 struct pxa_irda *si; 762 763 if (dev && netif_running(dev)) { 764 si = netdev_priv(dev); 765 netif_device_detach(dev); 766 pxa_irda_shutdown(si); 767 } 768 769 return 0; 770} 771 772static int pxa_irda_resume(struct platform_device *_dev) 773{ 774 struct net_device *dev = platform_get_drvdata(_dev); 775 struct pxa_irda *si; 776 777 if (dev && netif_running(dev)) { 778 si = netdev_priv(dev); 779 pxa_irda_startup(si); 780 netif_device_attach(dev); 781 netif_wake_queue(dev); 782 } 783 784 return 0; 785} 786 787 788static int pxa_irda_init_iobuf(iobuff_t *io, int size) 789{ 790 io->head = kmalloc(size, GFP_KERNEL | GFP_DMA); 791 if (io->head != NULL) { 792 io->truesize = size; 793 io->in_frame = FALSE; 794 io->state = OUTSIDE_FRAME; 795 io->data = io->head; 796 } 797 return io->head ? 0 : -ENOMEM; 798} 799 800static int pxa_irda_probe(struct platform_device *pdev) 801{ 802 struct net_device *dev; 803 struct pxa_irda *si; 804 unsigned int baudrate_mask; 805 int err; 806 807 if (!pdev->dev.platform_data) 808 return -ENODEV; 809 810 err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY; 811 if (err) 812 goto err_mem_1; 813 814 err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY; 815 if (err) 816 goto err_mem_2; 817 818 dev = alloc_irdadev(sizeof(struct pxa_irda)); 819 if (!dev) 820 goto err_mem_3; 821 822 si = netdev_priv(dev); 823 si->dev = &pdev->dev; 824 si->pdata = pdev->dev.platform_data; 825 826 si->sir_clk = clk_get(&pdev->dev, "UARTCLK"); 827 si->fir_clk = clk_get(&pdev->dev, "FICPCLK"); 828 if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) { 829 err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk); 830 goto err_mem_4; 831 } 832 833 /* 834 * Initialise the SIR buffers 835 */ 836 err = pxa_irda_init_iobuf(&si->rx_buff, 14384); 837 if (err) 838 goto err_mem_4; 839 err = pxa_irda_init_iobuf(&si->tx_buff, 4000); 840 if (err) 841 goto err_mem_5; 842 843 if (si->pdata->startup) 844 err = si->pdata->startup(si->dev); 845 if (err) 846 goto err_startup; 847 848 dev->hard_start_xmit = pxa_irda_hard_xmit; 849 dev->open = pxa_irda_start; 850 dev->stop = pxa_irda_stop; 851 dev->do_ioctl = pxa_irda_ioctl; 852 853 irda_init_max_qos_capabilies(&si->qos); 854 855 baudrate_mask = 0; 856 if (si->pdata->transceiver_cap & IR_SIRMODE) 857 baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; 858 if (si->pdata->transceiver_cap & IR_FIRMODE) 859 baudrate_mask |= IR_4000000 << 8; 860 861 si->qos.baud_rate.bits &= baudrate_mask; 862 si->qos.min_turn_time.bits = 7; /* 1ms or more */ 863 864 irda_qos_bits_to_value(&si->qos); 865 866 err = register_netdev(dev); 867 868 if (err == 0) 869 dev_set_drvdata(&pdev->dev, dev); 870 871 if (err) { 872 if (si->pdata->shutdown) 873 si->pdata->shutdown(si->dev); 874err_startup: 875 kfree(si->tx_buff.head); 876err_mem_5: 877 kfree(si->rx_buff.head); 878err_mem_4: 879 if (si->sir_clk && !IS_ERR(si->sir_clk)) 880 clk_put(si->sir_clk); 881 if (si->fir_clk && !IS_ERR(si->fir_clk)) 882 clk_put(si->fir_clk); 883 free_netdev(dev); 884err_mem_3: 885 release_mem_region(__PREG(FICP), 0x1c); 886err_mem_2: 887 release_mem_region(__PREG(STUART), 0x24); 888 } 889err_mem_1: 890 return err; 891} 892 893static int pxa_irda_remove(struct platform_device *_dev) 894{ 895 struct net_device *dev = platform_get_drvdata(_dev); 896 897 if (dev) { 898 struct pxa_irda *si = netdev_priv(dev); 899 unregister_netdev(dev); 900 if (si->pdata->shutdown) 901 si->pdata->shutdown(si->dev); 902 kfree(si->tx_buff.head); 903 kfree(si->rx_buff.head); 904 clk_put(si->fir_clk); 905 clk_put(si->sir_clk); 906 free_netdev(dev); 907 } 908 909 release_mem_region(__PREG(STUART), 0x24); 910 release_mem_region(__PREG(FICP), 0x1c); 911 912 return 0; 913} 914 915static struct platform_driver pxa_ir_driver = { 916 .driver = { 917 .name = "pxa2xx-ir", 918 .owner = THIS_MODULE, 919 }, 920 .probe = pxa_irda_probe, 921 .remove = pxa_irda_remove, 922 .suspend = pxa_irda_suspend, 923 .resume = pxa_irda_resume, 924}; 925 926static int __init pxa_irda_init(void) 927{ 928 return platform_driver_register(&pxa_ir_driver); 929} 930 931static void __exit pxa_irda_exit(void) 932{ 933 platform_driver_unregister(&pxa_ir_driver); 934} 935 936module_init(pxa_irda_init); 937module_exit(pxa_irda_exit); 938 939MODULE_LICENSE("GPL"); 940MODULE_ALIAS("platform:pxa2xx-ir"); 941