pxaficp_ir.c revision a6b7a407865aab9f849dd99a71072b7cd1175116
1/*
2 * linux/drivers/net/irda/pxaficp_ir.c
3 *
4 * Based on sa1100_ir.c by Russell King
5 *
6 * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
13 *
14 */
15#include <linux/interrupt.h>
16#include <linux/module.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/platform_device.h>
20#include <linux/clk.h>
21#include <linux/gpio.h>
22#include <linux/slab.h>
23
24#include <net/irda/irda.h>
25#include <net/irda/irmod.h>
26#include <net/irda/wrapper.h>
27#include <net/irda/irda_device.h>
28
29#include <mach/dma.h>
30#include <mach/irda.h>
31#include <mach/regs-uart.h>
32#include <mach/regs-ost.h>
33
34#define FICP		__REG(0x40800000)  /* Start of FICP area */
35#define ICCR0		__REG(0x40800000)  /* ICP Control Register 0 */
36#define ICCR1		__REG(0x40800004)  /* ICP Control Register 1 */
37#define ICCR2		__REG(0x40800008)  /* ICP Control Register 2 */
38#define ICDR		__REG(0x4080000c)  /* ICP Data Register */
39#define ICSR0		__REG(0x40800014)  /* ICP Status Register 0 */
40#define ICSR1		__REG(0x40800018)  /* ICP Status Register 1 */
41
42#define ICCR0_AME	(1 << 7)	/* Address match enable */
43#define ICCR0_TIE	(1 << 6)	/* Transmit FIFO interrupt enable */
44#define ICCR0_RIE	(1 << 5)	/* Receive FIFO interrupt enable */
45#define ICCR0_RXE	(1 << 4)	/* Receive enable */
46#define ICCR0_TXE	(1 << 3)	/* Transmit enable */
47#define ICCR0_TUS	(1 << 2)	/* Transmit FIFO underrun select */
48#define ICCR0_LBM	(1 << 1)	/* Loopback mode */
49#define ICCR0_ITR	(1 << 0)	/* IrDA transmission */
50
51#define ICCR2_RXP       (1 << 3)	/* Receive Pin Polarity select */
52#define ICCR2_TXP       (1 << 2)	/* Transmit Pin Polarity select */
53#define ICCR2_TRIG	(3 << 0)	/* Receive FIFO Trigger threshold */
54#define ICCR2_TRIG_8    (0 << 0)	/* 	>= 8 bytes */
55#define ICCR2_TRIG_16   (1 << 0)	/*	>= 16 bytes */
56#define ICCR2_TRIG_32   (2 << 0)	/*	>= 32 bytes */
57
58#ifdef CONFIG_PXA27x
59#define ICSR0_EOC	(1 << 6)	/* DMA End of Descriptor Chain */
60#endif
61#define ICSR0_FRE	(1 << 5)	/* Framing error */
62#define ICSR0_RFS	(1 << 4)	/* Receive FIFO service request */
63#define ICSR0_TFS	(1 << 3)	/* Transnit FIFO service request */
64#define ICSR0_RAB	(1 << 2)	/* Receiver abort */
65#define ICSR0_TUR	(1 << 1)	/* Trunsmit FIFO underun */
66#define ICSR0_EIF	(1 << 0)	/* End/Error in FIFO */
67
68#define ICSR1_ROR	(1 << 6)	/* Receiver FIFO underrun  */
69#define ICSR1_CRE	(1 << 5)	/* CRC error */
70#define ICSR1_EOF	(1 << 4)	/* End of frame */
71#define ICSR1_TNF	(1 << 3)	/* Transmit FIFO not full */
72#define ICSR1_RNE	(1 << 2)	/* Receive FIFO not empty */
73#define ICSR1_TBY	(1 << 1)	/* Tramsmiter busy flag */
74#define ICSR1_RSY	(1 << 0)	/* Recevier synchronized flag */
75
76#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
77#define IrSR_RXPL_POS_IS_ZERO 0x0
78#define IrSR_TXPL_NEG_IS_ZERO (1<<3)
79#define IrSR_TXPL_POS_IS_ZERO 0x0
80#define IrSR_XMODE_PULSE_1_6  (1<<2)
81#define IrSR_XMODE_PULSE_3_16 0x0
82#define IrSR_RCVEIR_IR_MODE   (1<<1)
83#define IrSR_RCVEIR_UART_MODE 0x0
84#define IrSR_XMITIR_IR_MODE   (1<<0)
85#define IrSR_XMITIR_UART_MODE 0x0
86
87#define IrSR_IR_RECEIVE_ON (\
88                IrSR_RXPL_NEG_IS_ZERO | \
89                IrSR_TXPL_POS_IS_ZERO | \
90                IrSR_XMODE_PULSE_3_16 | \
91                IrSR_RCVEIR_IR_MODE   | \
92                IrSR_XMITIR_UART_MODE)
93
94#define IrSR_IR_TRANSMIT_ON (\
95                IrSR_RXPL_NEG_IS_ZERO | \
96                IrSR_TXPL_POS_IS_ZERO | \
97                IrSR_XMODE_PULSE_3_16 | \
98                IrSR_RCVEIR_UART_MODE | \
99                IrSR_XMITIR_IR_MODE)
100
101struct pxa_irda {
102	int			speed;
103	int			newspeed;
104	unsigned long		last_oscr;
105
106	unsigned char		*dma_rx_buff;
107	unsigned char		*dma_tx_buff;
108	dma_addr_t		dma_rx_buff_phy;
109	dma_addr_t		dma_tx_buff_phy;
110	unsigned int		dma_tx_buff_len;
111	int			txdma;
112	int			rxdma;
113
114	struct irlap_cb		*irlap;
115	struct qos_info		qos;
116
117	iobuff_t		tx_buff;
118	iobuff_t		rx_buff;
119
120	struct device		*dev;
121	struct pxaficp_platform_data *pdata;
122	struct clk		*fir_clk;
123	struct clk		*sir_clk;
124	struct clk		*cur_clk;
125};
126
127static inline void pxa_irda_disable_clk(struct pxa_irda *si)
128{
129	if (si->cur_clk)
130		clk_disable(si->cur_clk);
131	si->cur_clk = NULL;
132}
133
134static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
135{
136	si->cur_clk = si->fir_clk;
137	clk_enable(si->fir_clk);
138}
139
140static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
141{
142	si->cur_clk = si->sir_clk;
143	clk_enable(si->sir_clk);
144}
145
146
147#define IS_FIR(si)		((si)->speed >= 4000000)
148#define IRDA_FRAME_SIZE_LIMIT	2047
149
150inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
151{
152	DCSR(si->rxdma)  = DCSR_NODESC;
153	DSADR(si->rxdma) = __PREG(ICDR);
154	DTADR(si->rxdma) = si->dma_rx_buff_phy;
155	DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC |  DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
156	DCSR(si->rxdma) |= DCSR_RUN;
157}
158
159inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
160{
161	DCSR(si->txdma)  = DCSR_NODESC;
162	DSADR(si->txdma) = si->dma_tx_buff_phy;
163	DTADR(si->txdma) = __PREG(ICDR);
164	DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG |  DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
165	DCSR(si->txdma) |= DCSR_RUN;
166}
167
168/*
169 * Set the IrDA communications mode.
170 */
171static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
172{
173	if (si->pdata->transceiver_mode)
174		si->pdata->transceiver_mode(si->dev, mode);
175	else {
176		if (gpio_is_valid(si->pdata->gpio_pwdown))
177			gpio_set_value(si->pdata->gpio_pwdown,
178					!(mode & IR_OFF) ^
179					!si->pdata->gpio_pwdown_inverted);
180		pxa2xx_transceiver_mode(si->dev, mode);
181	}
182}
183
184/*
185 * Set the IrDA communications speed.
186 */
187static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
188{
189	unsigned long flags;
190	unsigned int divisor;
191
192	switch (speed) {
193	case 9600:	case 19200:	case 38400:
194	case 57600:	case 115200:
195
196		/* refer to PXA250/210 Developer's Manual 10-7 */
197		/*  BaudRate = 14.7456 MHz / (16*Divisor) */
198		divisor = 14745600 / (16 * speed);
199
200		local_irq_save(flags);
201
202		if (IS_FIR(si)) {
203			/* stop RX DMA */
204			DCSR(si->rxdma) &= ~DCSR_RUN;
205			/* disable FICP */
206			ICCR0 = 0;
207			pxa_irda_disable_clk(si);
208
209			/* set board transceiver to SIR mode */
210			pxa_irda_set_mode(si, IR_SIRMODE);
211
212			/* enable the STUART clock */
213			pxa_irda_enable_sirclk(si);
214		}
215
216		/* disable STUART first */
217		STIER = 0;
218
219		/* access DLL & DLH */
220		STLCR |= LCR_DLAB;
221		STDLL = divisor & 0xff;
222		STDLH = divisor >> 8;
223		STLCR &= ~LCR_DLAB;
224
225		si->speed = speed;
226		STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
227		STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
228
229		local_irq_restore(flags);
230		break;
231
232	case 4000000:
233		local_irq_save(flags);
234
235		/* disable STUART */
236		STIER = 0;
237		STISR = 0;
238		pxa_irda_disable_clk(si);
239
240		/* disable FICP first */
241		ICCR0 = 0;
242
243		/* set board transceiver to FIR mode */
244		pxa_irda_set_mode(si, IR_FIRMODE);
245
246		/* enable the FICP clock */
247		pxa_irda_enable_firclk(si);
248
249		si->speed = speed;
250		pxa_irda_fir_dma_rx_start(si);
251		ICCR0 = ICCR0_ITR | ICCR0_RXE;
252
253		local_irq_restore(flags);
254		break;
255
256	default:
257		return -EINVAL;
258	}
259
260	return 0;
261}
262
263/* SIR interrupt service routine. */
264static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
265{
266	struct net_device *dev = dev_id;
267	struct pxa_irda *si = netdev_priv(dev);
268	int iir, lsr, data;
269
270	iir = STIIR;
271
272	switch  (iir & 0x0F) {
273	case 0x06: /* Receiver Line Status */
274	  	lsr = STLSR;
275		while (lsr & LSR_FIFOE) {
276			data = STRBR;
277			if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
278				printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
279				dev->stats.rx_errors++;
280				if (lsr & LSR_FE)
281					dev->stats.rx_frame_errors++;
282				if (lsr & LSR_OE)
283					dev->stats.rx_fifo_errors++;
284			} else {
285				dev->stats.rx_bytes++;
286				async_unwrap_char(dev, &dev->stats,
287						  &si->rx_buff, data);
288			}
289			lsr = STLSR;
290		}
291		si->last_oscr = OSCR;
292		break;
293
294	case 0x04: /* Received Data Available */
295	  	   /* forth through */
296
297	case 0x0C: /* Character Timeout Indication */
298	  	do  {
299		    dev->stats.rx_bytes++;
300	            async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR);
301	  	} while (STLSR & LSR_DR);
302		si->last_oscr = OSCR;
303	  	break;
304
305	case 0x02: /* Transmit FIFO Data Request */
306	    	while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
307	    		STTHR = *si->tx_buff.data++;
308			si->tx_buff.len -= 1;
309	    	}
310
311		if (si->tx_buff.len == 0) {
312			dev->stats.tx_packets++;
313			dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
314
315                        /* We need to ensure that the transmitter has finished. */
316			while ((STLSR & LSR_TEMT) == 0)
317				cpu_relax();
318			si->last_oscr = OSCR;
319
320			/*
321		 	* Ok, we've finished transmitting.  Now enable
322		 	* the receiver.  Sometimes we get a receive IRQ
323		 	* immediately after a transmit...
324		 	*/
325			if (si->newspeed) {
326				pxa_irda_set_speed(si, si->newspeed);
327				si->newspeed = 0;
328			} else {
329				/* enable IR Receiver, disable IR Transmitter */
330				STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
331				/* enable STUART and receive interrupts */
332				STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
333			}
334			/* I'm hungry! */
335			netif_wake_queue(dev);
336		}
337		break;
338	}
339
340	return IRQ_HANDLED;
341}
342
343/* FIR Receive DMA interrupt handler */
344static void pxa_irda_fir_dma_rx_irq(int channel, void *data)
345{
346	int dcsr = DCSR(channel);
347
348	DCSR(channel) = dcsr & ~DCSR_RUN;
349
350	printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr);
351}
352
353/* FIR Transmit DMA interrupt handler */
354static void pxa_irda_fir_dma_tx_irq(int channel, void *data)
355{
356	struct net_device *dev = data;
357	struct pxa_irda *si = netdev_priv(dev);
358	int dcsr;
359
360	dcsr = DCSR(channel);
361	DCSR(channel) = dcsr & ~DCSR_RUN;
362
363	if (dcsr & DCSR_ENDINTR)  {
364		dev->stats.tx_packets++;
365		dev->stats.tx_bytes += si->dma_tx_buff_len;
366	} else {
367		dev->stats.tx_errors++;
368	}
369
370	while (ICSR1 & ICSR1_TBY)
371		cpu_relax();
372	si->last_oscr = OSCR;
373
374	/*
375	 * HACK: It looks like the TBY bit is dropped too soon.
376	 * Without this delay things break.
377	 */
378	udelay(120);
379
380	if (si->newspeed) {
381		pxa_irda_set_speed(si, si->newspeed);
382		si->newspeed = 0;
383	} else {
384		int i = 64;
385
386		ICCR0 = 0;
387		pxa_irda_fir_dma_rx_start(si);
388		while ((ICSR1 & ICSR1_RNE) && i--)
389			(void)ICDR;
390		ICCR0 = ICCR0_ITR | ICCR0_RXE;
391
392		if (i < 0)
393			printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
394	}
395	netif_wake_queue(dev);
396}
397
398/* EIF(Error in FIFO/End in Frame) handler for FIR */
399static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
400{
401	unsigned int len, stat, data;
402
403	/* Get the current data position. */
404	len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
405
406	do {
407		/* Read Status, and then Data. 	 */
408		stat = ICSR1;
409		rmb();
410		data = ICDR;
411
412		if (stat & (ICSR1_CRE | ICSR1_ROR)) {
413			dev->stats.rx_errors++;
414			if (stat & ICSR1_CRE) {
415				printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
416				dev->stats.rx_crc_errors++;
417			}
418			if (stat & ICSR1_ROR) {
419				printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
420				dev->stats.rx_over_errors++;
421			}
422		} else	{
423			si->dma_rx_buff[len++] = data;
424		}
425		/* If we hit the end of frame, there's no point in continuing. */
426		if (stat & ICSR1_EOF)
427			break;
428	} while (ICSR0 & ICSR0_EIF);
429
430	if (stat & ICSR1_EOF) {
431		/* end of frame. */
432		struct sk_buff *skb;
433
434		if (icsr0 & ICSR0_FRE) {
435			printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
436			dev->stats.rx_dropped++;
437			return;
438		}
439
440		skb = alloc_skb(len+1,GFP_ATOMIC);
441		if (!skb)  {
442			printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
443			dev->stats.rx_dropped++;
444			return;
445		}
446
447		/* Align IP header to 20 bytes  */
448		skb_reserve(skb, 1);
449		skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
450		skb_put(skb, len);
451
452		/* Feed it to IrLAP  */
453		skb->dev = dev;
454		skb_reset_mac_header(skb);
455		skb->protocol = htons(ETH_P_IRDA);
456		netif_rx(skb);
457
458		dev->stats.rx_packets++;
459		dev->stats.rx_bytes += len;
460	}
461}
462
463/* FIR interrupt handler */
464static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
465{
466	struct net_device *dev = dev_id;
467	struct pxa_irda *si = netdev_priv(dev);
468	int icsr0, i = 64;
469
470	/* stop RX DMA */
471	DCSR(si->rxdma) &= ~DCSR_RUN;
472	si->last_oscr = OSCR;
473	icsr0 = ICSR0;
474
475	if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
476		if (icsr0 & ICSR0_FRE) {
477		        printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
478			dev->stats.rx_frame_errors++;
479		} else {
480			printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
481			dev->stats.rx_errors++;
482		}
483		ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
484	}
485
486	if (icsr0 & ICSR0_EIF) {
487		/* An error in FIFO occurred, or there is a end of frame */
488		pxa_irda_fir_irq_eif(si, dev, icsr0);
489	}
490
491	ICCR0 = 0;
492	pxa_irda_fir_dma_rx_start(si);
493	while ((ICSR1 & ICSR1_RNE) && i--)
494		(void)ICDR;
495	ICCR0 = ICCR0_ITR | ICCR0_RXE;
496
497	if (i < 0)
498		printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
499
500	return IRQ_HANDLED;
501}
502
503/* hard_xmit interface of irda device */
504static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
505{
506	struct pxa_irda *si = netdev_priv(dev);
507	int speed = irda_get_next_speed(skb);
508
509	/*
510	 * Does this packet contain a request to change the interface
511	 * speed?  If so, remember it until we complete the transmission
512	 * of this frame.
513	 */
514	if (speed != si->speed && speed != -1)
515		si->newspeed = speed;
516
517	/*
518	 * If this is an empty frame, we can bypass a lot.
519	 */
520	if (skb->len == 0) {
521		if (si->newspeed) {
522			si->newspeed = 0;
523			pxa_irda_set_speed(si, speed);
524		}
525		dev_kfree_skb(skb);
526		return NETDEV_TX_OK;
527	}
528
529	netif_stop_queue(dev);
530
531	if (!IS_FIR(si)) {
532		si->tx_buff.data = si->tx_buff.head;
533		si->tx_buff.len  = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
534
535		/* Disable STUART interrupts and switch to transmit mode. */
536		STIER = 0;
537		STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;
538
539		/* enable STUART and transmit interrupts */
540		STIER = IER_UUE | IER_TIE;
541	} else {
542		unsigned long mtt = irda_get_mtt(skb);
543
544		si->dma_tx_buff_len = skb->len;
545		skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
546
547		if (mtt)
548			while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
549				cpu_relax();
550
551		/* stop RX DMA,  disable FICP */
552		DCSR(si->rxdma) &= ~DCSR_RUN;
553		ICCR0 = 0;
554
555		pxa_irda_fir_dma_tx_start(si);
556		ICCR0 = ICCR0_ITR | ICCR0_TXE;
557	}
558
559	dev_kfree_skb(skb);
560	return NETDEV_TX_OK;
561}
562
563static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
564{
565	struct if_irda_req *rq = (struct if_irda_req *)ifreq;
566	struct pxa_irda *si = netdev_priv(dev);
567	int ret;
568
569	switch (cmd) {
570	case SIOCSBANDWIDTH:
571		ret = -EPERM;
572		if (capable(CAP_NET_ADMIN)) {
573			/*
574			 * We are unable to set the speed if the
575			 * device is not running.
576			 */
577			if (netif_running(dev)) {
578				ret = pxa_irda_set_speed(si,
579						rq->ifr_baudrate);
580			} else {
581				printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
582				ret = 0;
583			}
584		}
585		break;
586
587	case SIOCSMEDIABUSY:
588		ret = -EPERM;
589		if (capable(CAP_NET_ADMIN)) {
590			irda_device_set_media_busy(dev, TRUE);
591			ret = 0;
592		}
593		break;
594
595	case SIOCGRECEIVING:
596		ret = 0;
597		rq->ifr_receiving = IS_FIR(si) ? 0
598					: si->rx_buff.state != OUTSIDE_FRAME;
599		break;
600
601	default:
602		ret = -EOPNOTSUPP;
603		break;
604	}
605
606	return ret;
607}
608
609static void pxa_irda_startup(struct pxa_irda *si)
610{
611	/* Disable STUART interrupts */
612	STIER = 0;
613	/* enable STUART interrupt to the processor */
614	STMCR = MCR_OUT2;
615	/* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
616	STLCR = LCR_WLS0 | LCR_WLS1;
617	/* enable FIFO, we use FIFO to improve performance */
618	STFCR = FCR_TRFIFOE | FCR_ITL_32;
619
620	/* disable FICP */
621	ICCR0 = 0;
622	/* configure FICP ICCR2 */
623	ICCR2 = ICCR2_TXP | ICCR2_TRIG_32;
624
625	/* configure DMAC */
626	DRCMR(17) = si->rxdma | DRCMR_MAPVLD;
627	DRCMR(18) = si->txdma | DRCMR_MAPVLD;
628
629	/* force SIR reinitialization */
630	si->speed = 4000000;
631	pxa_irda_set_speed(si, 9600);
632
633	printk(KERN_DEBUG "pxa_ir: irda startup\n");
634}
635
636static void pxa_irda_shutdown(struct pxa_irda *si)
637{
638	unsigned long flags;
639
640	local_irq_save(flags);
641
642	/* disable STUART and interrupt */
643	STIER = 0;
644	/* disable STUART SIR mode */
645	STISR = 0;
646
647	/* disable DMA */
648	DCSR(si->txdma) &= ~DCSR_RUN;
649	DCSR(si->rxdma) &= ~DCSR_RUN;
650	/* disable FICP */
651	ICCR0 = 0;
652
653	/* disable the STUART or FICP clocks */
654	pxa_irda_disable_clk(si);
655
656	DRCMR(17) = 0;
657	DRCMR(18) = 0;
658
659	local_irq_restore(flags);
660
661	/* power off board transceiver */
662	pxa_irda_set_mode(si, IR_OFF);
663
664	printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
665}
666
667static int pxa_irda_start(struct net_device *dev)
668{
669	struct pxa_irda *si = netdev_priv(dev);
670	int err;
671
672	si->speed = 9600;
673
674	err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
675	if (err)
676		goto err_irq1;
677
678	err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
679	if (err)
680		goto err_irq2;
681
682	/*
683	 * The interrupt must remain disabled for now.
684	 */
685	disable_irq(IRQ_STUART);
686	disable_irq(IRQ_ICP);
687
688	err = -EBUSY;
689	si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
690	if (si->rxdma < 0)
691		goto err_rx_dma;
692
693	si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
694	if (si->txdma < 0)
695		goto err_tx_dma;
696
697	err = -ENOMEM;
698	si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
699					     &si->dma_rx_buff_phy, GFP_KERNEL );
700	if (!si->dma_rx_buff)
701		goto err_dma_rx_buff;
702
703	si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
704					     &si->dma_tx_buff_phy, GFP_KERNEL );
705	if (!si->dma_tx_buff)
706		goto err_dma_tx_buff;
707
708	/* Setup the serial port for the initial speed. */
709	pxa_irda_startup(si);
710
711	/*
712	 * Open a new IrLAP layer instance.
713	 */
714	si->irlap = irlap_open(dev, &si->qos, "pxa");
715	err = -ENOMEM;
716	if (!si->irlap)
717		goto err_irlap;
718
719	/*
720	 * Now enable the interrupt and start the queue
721	 */
722	enable_irq(IRQ_STUART);
723	enable_irq(IRQ_ICP);
724	netif_start_queue(dev);
725
726	printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
727
728	return 0;
729
730err_irlap:
731	pxa_irda_shutdown(si);
732	dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
733err_dma_tx_buff:
734	dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
735err_dma_rx_buff:
736	pxa_free_dma(si->txdma);
737err_tx_dma:
738	pxa_free_dma(si->rxdma);
739err_rx_dma:
740	free_irq(IRQ_ICP, dev);
741err_irq2:
742	free_irq(IRQ_STUART, dev);
743err_irq1:
744
745	return err;
746}
747
748static int pxa_irda_stop(struct net_device *dev)
749{
750	struct pxa_irda *si = netdev_priv(dev);
751
752	netif_stop_queue(dev);
753
754	pxa_irda_shutdown(si);
755
756	/* Stop IrLAP */
757	if (si->irlap) {
758		irlap_close(si->irlap);
759		si->irlap = NULL;
760	}
761
762	free_irq(IRQ_STUART, dev);
763	free_irq(IRQ_ICP, dev);
764
765	pxa_free_dma(si->rxdma);
766	pxa_free_dma(si->txdma);
767
768	if (si->dma_rx_buff)
769		dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
770	if (si->dma_tx_buff)
771		dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
772
773	printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
774	return 0;
775}
776
777static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
778{
779	struct net_device *dev = platform_get_drvdata(_dev);
780	struct pxa_irda *si;
781
782	if (dev && netif_running(dev)) {
783		si = netdev_priv(dev);
784		netif_device_detach(dev);
785		pxa_irda_shutdown(si);
786	}
787
788	return 0;
789}
790
791static int pxa_irda_resume(struct platform_device *_dev)
792{
793	struct net_device *dev = platform_get_drvdata(_dev);
794	struct pxa_irda *si;
795
796	if (dev && netif_running(dev)) {
797		si = netdev_priv(dev);
798		pxa_irda_startup(si);
799		netif_device_attach(dev);
800		netif_wake_queue(dev);
801	}
802
803	return 0;
804}
805
806
807static int pxa_irda_init_iobuf(iobuff_t *io, int size)
808{
809	io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
810	if (io->head != NULL) {
811		io->truesize = size;
812		io->in_frame = FALSE;
813		io->state    = OUTSIDE_FRAME;
814		io->data     = io->head;
815	}
816	return io->head ? 0 : -ENOMEM;
817}
818
819static const struct net_device_ops pxa_irda_netdev_ops = {
820	.ndo_open		= pxa_irda_start,
821	.ndo_stop		= pxa_irda_stop,
822	.ndo_start_xmit		= pxa_irda_hard_xmit,
823	.ndo_do_ioctl		= pxa_irda_ioctl,
824};
825
826static int pxa_irda_probe(struct platform_device *pdev)
827{
828	struct net_device *dev;
829	struct pxa_irda *si;
830	unsigned int baudrate_mask;
831	int err;
832
833	if (!pdev->dev.platform_data)
834		return -ENODEV;
835
836	err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY;
837	if (err)
838		goto err_mem_1;
839
840	err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY;
841	if (err)
842		goto err_mem_2;
843
844	dev = alloc_irdadev(sizeof(struct pxa_irda));
845	if (!dev)
846		goto err_mem_3;
847
848	SET_NETDEV_DEV(dev, &pdev->dev);
849	si = netdev_priv(dev);
850	si->dev = &pdev->dev;
851	si->pdata = pdev->dev.platform_data;
852
853	si->sir_clk = clk_get(&pdev->dev, "UARTCLK");
854	si->fir_clk = clk_get(&pdev->dev, "FICPCLK");
855	if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
856		err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
857		goto err_mem_4;
858	}
859
860	/*
861	 * Initialise the SIR buffers
862	 */
863	err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
864	if (err)
865		goto err_mem_4;
866	err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
867	if (err)
868		goto err_mem_5;
869
870	if (gpio_is_valid(si->pdata->gpio_pwdown)) {
871		err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
872		if (err)
873			goto err_startup;
874		err = gpio_direction_output(si->pdata->gpio_pwdown,
875					!si->pdata->gpio_pwdown_inverted);
876		if (err) {
877			gpio_free(si->pdata->gpio_pwdown);
878			goto err_startup;
879		}
880	}
881
882	if (si->pdata->startup) {
883		err = si->pdata->startup(si->dev);
884		if (err)
885			goto err_startup;
886	}
887
888	if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
889		dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
890
891	dev->netdev_ops = &pxa_irda_netdev_ops;
892
893	irda_init_max_qos_capabilies(&si->qos);
894
895	baudrate_mask = 0;
896	if (si->pdata->transceiver_cap & IR_SIRMODE)
897		baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
898	if (si->pdata->transceiver_cap & IR_FIRMODE)
899		baudrate_mask |= IR_4000000 << 8;
900
901	si->qos.baud_rate.bits &= baudrate_mask;
902	si->qos.min_turn_time.bits = 7;  /* 1ms or more */
903
904	irda_qos_bits_to_value(&si->qos);
905
906	err = register_netdev(dev);
907
908	if (err == 0)
909		dev_set_drvdata(&pdev->dev, dev);
910
911	if (err) {
912		if (si->pdata->shutdown)
913			si->pdata->shutdown(si->dev);
914err_startup:
915		kfree(si->tx_buff.head);
916err_mem_5:
917		kfree(si->rx_buff.head);
918err_mem_4:
919		if (si->sir_clk && !IS_ERR(si->sir_clk))
920			clk_put(si->sir_clk);
921		if (si->fir_clk && !IS_ERR(si->fir_clk))
922			clk_put(si->fir_clk);
923		free_netdev(dev);
924err_mem_3:
925		release_mem_region(__PREG(FICP), 0x1c);
926err_mem_2:
927		release_mem_region(__PREG(STUART), 0x24);
928	}
929err_mem_1:
930	return err;
931}
932
933static int pxa_irda_remove(struct platform_device *_dev)
934{
935	struct net_device *dev = platform_get_drvdata(_dev);
936
937	if (dev) {
938		struct pxa_irda *si = netdev_priv(dev);
939		unregister_netdev(dev);
940		if (gpio_is_valid(si->pdata->gpio_pwdown))
941			gpio_free(si->pdata->gpio_pwdown);
942		if (si->pdata->shutdown)
943			si->pdata->shutdown(si->dev);
944		kfree(si->tx_buff.head);
945		kfree(si->rx_buff.head);
946		clk_put(si->fir_clk);
947		clk_put(si->sir_clk);
948		free_netdev(dev);
949	}
950
951	release_mem_region(__PREG(STUART), 0x24);
952	release_mem_region(__PREG(FICP), 0x1c);
953
954	return 0;
955}
956
957static struct platform_driver pxa_ir_driver = {
958	.driver         = {
959		.name   = "pxa2xx-ir",
960		.owner	= THIS_MODULE,
961	},
962	.probe		= pxa_irda_probe,
963	.remove		= pxa_irda_remove,
964	.suspend	= pxa_irda_suspend,
965	.resume		= pxa_irda_resume,
966};
967
968static int __init pxa_irda_init(void)
969{
970	return platform_driver_register(&pxa_ir_driver);
971}
972
973static void __exit pxa_irda_exit(void)
974{
975	platform_driver_unregister(&pxa_ir_driver);
976}
977
978module_init(pxa_irda_init);
979module_exit(pxa_irda_exit);
980
981MODULE_LICENSE("GPL");
982MODULE_ALIAS("platform:pxa2xx-ir");
983