1/* interrupt handling
2    Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
3    Copyright (C) 2004  Chris Kennedy <c@groovy.org>
4    Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
25#include "ivtv-mailbox.h"
26#include "ivtv-vbi.h"
27#include "ivtv-yuv.h"
28#include <media/v4l2-event.h>
29
30#define DMA_MAGIC_COOKIE 0x000001fe
31
32static void ivtv_dma_dec_start(struct ivtv_stream *s);
33
34static const int ivtv_stream_map[] = {
35	IVTV_ENC_STREAM_TYPE_MPG,
36	IVTV_ENC_STREAM_TYPE_YUV,
37	IVTV_ENC_STREAM_TYPE_PCM,
38	IVTV_ENC_STREAM_TYPE_VBI,
39};
40
41
42static void ivtv_pio_work_handler(struct ivtv *itv)
43{
44	struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
45	struct ivtv_buffer *buf;
46	int i = 0;
47
48	IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
49	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
50			s->vdev == NULL || !ivtv_use_pio(s)) {
51		itv->cur_pio_stream = -1;
52		/* trigger PIO complete user interrupt */
53		write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
54		return;
55	}
56	IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
57	list_for_each_entry(buf, &s->q_dma.list, list) {
58		u32 size = s->sg_processing[i].size & 0x3ffff;
59
60		/* Copy the data from the card to the buffer */
61		if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
62			memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
63		}
64		else {
65			memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
66		}
67		i++;
68		if (i == s->sg_processing_size)
69			break;
70	}
71	write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
72}
73
74void ivtv_irq_work_handler(struct kthread_work *work)
75{
76	struct ivtv *itv = container_of(work, struct ivtv, irq_work);
77
78	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
79		ivtv_pio_work_handler(itv);
80
81	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
82		ivtv_vbi_work_handler(itv);
83
84	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
85		ivtv_yuv_work_handler(itv);
86}
87
88/* Determine the required DMA size, setup enough buffers in the predma queue and
89   actually copy the data from the card to the buffers in case a PIO transfer is
90   required for this stream.
91 */
92static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
93{
94	struct ivtv *itv = s->itv;
95	struct ivtv_buffer *buf;
96	u32 bytes_needed = 0;
97	u32 offset, size;
98	u32 UVoffset = 0, UVsize = 0;
99	int skip_bufs = s->q_predma.buffers;
100	int idx = s->sg_pending_size;
101	int rc;
102
103	/* sanity checks */
104	if (s->vdev == NULL) {
105		IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
106		return -1;
107	}
108	if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
109		IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
110		return -1;
111	}
112
113	/* determine offset, size and PTS for the various streams */
114	switch (s->type) {
115		case IVTV_ENC_STREAM_TYPE_MPG:
116			offset = data[1];
117			size = data[2];
118			s->pending_pts = 0;
119			break;
120
121		case IVTV_ENC_STREAM_TYPE_YUV:
122			offset = data[1];
123			size = data[2];
124			UVoffset = data[3];
125			UVsize = data[4];
126			s->pending_pts = ((u64) data[5] << 32) | data[6];
127			break;
128
129		case IVTV_ENC_STREAM_TYPE_PCM:
130			offset = data[1] + 12;
131			size = data[2] - 12;
132			s->pending_pts = read_dec(offset - 8) |
133				((u64)(read_dec(offset - 12)) << 32);
134			if (itv->has_cx23415)
135				offset += IVTV_DECODER_OFFSET;
136			break;
137
138		case IVTV_ENC_STREAM_TYPE_VBI:
139			size = itv->vbi.enc_size * itv->vbi.fpi;
140			offset = read_enc(itv->vbi.enc_start - 4) + 12;
141			if (offset == 12) {
142				IVTV_DEBUG_INFO("VBI offset == 0\n");
143				return -1;
144			}
145			s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
146			break;
147
148		case IVTV_DEC_STREAM_TYPE_VBI:
149			size = read_dec(itv->vbi.dec_start + 4) + 8;
150			offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
151			s->pending_pts = 0;
152			offset += IVTV_DECODER_OFFSET;
153			break;
154		default:
155			/* shouldn't happen */
156			return -1;
157	}
158
159	/* if this is the start of the DMA then fill in the magic cookie */
160	if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
161		if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
162		    s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
163			s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
164			write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
165		}
166		else {
167			s->pending_backup = read_enc(offset);
168			write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
169		}
170		s->pending_offset = offset;
171	}
172
173	bytes_needed = size;
174	if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
175		/* The size for the Y samples needs to be rounded upwards to a
176		   multiple of the buf_size. The UV samples then start in the
177		   next buffer. */
178		bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
179		bytes_needed += UVsize;
180	}
181
182	IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
183		ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
184
185	rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
186	if (rc < 0) { /* Insufficient buffers */
187		IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
188				bytes_needed, s->name);
189		return -1;
190	}
191	if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
192		IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
193		IVTV_WARN("Cause: the application is not reading fast enough.\n");
194	}
195	s->buffers_stolen = rc;
196
197	/* got the buffers, now fill in sg_pending */
198	buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
199	memset(buf->buf, 0, 128);
200	list_for_each_entry(buf, &s->q_predma.list, list) {
201		if (skip_bufs-- > 0)
202			continue;
203		s->sg_pending[idx].dst = buf->dma_handle;
204		s->sg_pending[idx].src = offset;
205		s->sg_pending[idx].size = s->buf_size;
206		buf->bytesused = min(size, s->buf_size);
207		buf->dma_xfer_cnt = s->dma_xfer_cnt;
208
209		s->q_predma.bytesused += buf->bytesused;
210		size -= buf->bytesused;
211		offset += s->buf_size;
212
213		/* Sync SG buffers */
214		ivtv_buf_sync_for_device(s, buf);
215
216		if (size == 0) {	/* YUV */
217			/* process the UV section */
218			offset = UVoffset;
219			size = UVsize;
220		}
221		idx++;
222	}
223	s->sg_pending_size = idx;
224	return 0;
225}
226
227static void dma_post(struct ivtv_stream *s)
228{
229	struct ivtv *itv = s->itv;
230	struct ivtv_buffer *buf = NULL;
231	struct list_head *p;
232	u32 offset;
233	__le32 *u32buf;
234	int x = 0;
235
236	IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
237			s->name, s->dma_offset);
238	list_for_each(p, &s->q_dma.list) {
239		buf = list_entry(p, struct ivtv_buffer, list);
240		u32buf = (__le32 *)buf->buf;
241
242		/* Sync Buffer */
243		ivtv_buf_sync_for_cpu(s, buf);
244
245		if (x == 0 && ivtv_use_dma(s)) {
246			offset = s->dma_last_offset;
247			if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
248			{
249				for (offset = 0; offset < 64; offset++) {
250					if (u32buf[offset] == DMA_MAGIC_COOKIE) {
251						break;
252					}
253				}
254				offset *= 4;
255				if (offset == 256) {
256					IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
257					offset = s->dma_last_offset;
258				}
259				if (s->dma_last_offset != offset)
260					IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
261				s->dma_last_offset = offset;
262			}
263			if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
264						s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
265				write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
266			}
267			else {
268				write_enc_sync(0, s->dma_offset);
269			}
270			if (offset) {
271				buf->bytesused -= offset;
272				memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
273			}
274			*u32buf = cpu_to_le32(s->dma_backup);
275		}
276		x++;
277		/* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
278		if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
279		    s->type == IVTV_ENC_STREAM_TYPE_VBI)
280			buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
281	}
282	if (buf)
283		buf->bytesused += s->dma_last_offset;
284	if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
285		list_for_each_entry(buf, &s->q_dma.list, list) {
286			/* Parse and Groom VBI Data */
287			s->q_dma.bytesused -= buf->bytesused;
288			ivtv_process_vbi_data(itv, buf, 0, s->type);
289			s->q_dma.bytesused += buf->bytesused;
290		}
291		if (s->fh == NULL) {
292			ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
293			return;
294		}
295	}
296	ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
297	if (s->fh)
298		wake_up(&s->waitq);
299}
300
301void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
302{
303	struct ivtv *itv = s->itv;
304	struct yuv_playback_info *yi = &itv->yuv_info;
305	u8 frame = yi->draw_frame;
306	struct yuv_frame_info *f = &yi->new_frame_info[frame];
307	struct ivtv_buffer *buf;
308	u32 y_size = 720 * ((f->src_h + 31) & ~31);
309	u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
310	int y_done = 0;
311	int bytes_written = 0;
312	unsigned long flags = 0;
313	int idx = 0;
314
315	IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
316
317	/* Insert buffer block for YUV if needed */
318	if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
319		if (yi->blanking_dmaptr) {
320			s->sg_pending[idx].src = yi->blanking_dmaptr;
321			s->sg_pending[idx].dst = offset;
322			s->sg_pending[idx].size = 720 * 16;
323		}
324		offset += 720 * 16;
325		idx++;
326	}
327
328	list_for_each_entry(buf, &s->q_predma.list, list) {
329		/* YUV UV Offset from Y Buffer */
330		if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
331				(bytes_written + buf->bytesused) >= y_size) {
332			s->sg_pending[idx].src = buf->dma_handle;
333			s->sg_pending[idx].dst = offset;
334			s->sg_pending[idx].size = y_size - bytes_written;
335			offset = uv_offset;
336			if (s->sg_pending[idx].size != buf->bytesused) {
337				idx++;
338				s->sg_pending[idx].src =
339				  buf->dma_handle + s->sg_pending[idx - 1].size;
340				s->sg_pending[idx].dst = offset;
341				s->sg_pending[idx].size =
342				   buf->bytesused - s->sg_pending[idx - 1].size;
343				offset += s->sg_pending[idx].size;
344			}
345			y_done = 1;
346		} else {
347			s->sg_pending[idx].src = buf->dma_handle;
348			s->sg_pending[idx].dst = offset;
349			s->sg_pending[idx].size = buf->bytesused;
350			offset += buf->bytesused;
351		}
352		bytes_written += buf->bytesused;
353
354		/* Sync SG buffers */
355		ivtv_buf_sync_for_device(s, buf);
356		idx++;
357	}
358	s->sg_pending_size = idx;
359
360	/* Sync Hardware SG List of buffers */
361	ivtv_stream_sync_for_device(s);
362	if (lock)
363		spin_lock_irqsave(&itv->dma_reg_lock, flags);
364	if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
365		ivtv_dma_dec_start(s);
366	}
367	else {
368		set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
369	}
370	if (lock)
371		spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
372}
373
374static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
375{
376	struct ivtv *itv = s->itv;
377
378	s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
379	s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
380	s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
381	s->sg_processed++;
382	/* Sync Hardware SG List of buffers */
383	ivtv_stream_sync_for_device(s);
384	write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
385	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
386	itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
387	add_timer(&itv->dma_timer);
388}
389
390static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
391{
392	struct ivtv *itv = s->itv;
393
394	s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
395	s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
396	s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
397	s->sg_processed++;
398	/* Sync Hardware SG List of buffers */
399	ivtv_stream_sync_for_device(s);
400	write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
401	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
402	itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
403	add_timer(&itv->dma_timer);
404}
405
406/* start the encoder DMA */
407static void ivtv_dma_enc_start(struct ivtv_stream *s)
408{
409	struct ivtv *itv = s->itv;
410	struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
411	int i;
412
413	IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
414
415	if (s->q_predma.bytesused)
416		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
417
418	if (ivtv_use_dma(s))
419		s->sg_pending[s->sg_pending_size - 1].size += 256;
420
421	/* If this is an MPEG stream, and VBI data is also pending, then append the
422	   VBI DMA to the MPEG DMA and transfer both sets of data at once.
423
424	   VBI DMA is a second class citizen compared to MPEG and mixing them together
425	   will confuse the firmware (the end of a VBI DMA is seen as the end of a
426	   MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
427	   sure we only use the MPEG DMA to transfer the VBI DMA if both are in
428	   use. This way no conflicts occur. */
429	clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
430	if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
431			s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
432		ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
433		if (ivtv_use_dma(s_vbi))
434			s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
435		for (i = 0; i < s_vbi->sg_pending_size; i++) {
436			s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
437		}
438		s_vbi->dma_offset = s_vbi->pending_offset;
439		s_vbi->sg_pending_size = 0;
440		s_vbi->dma_xfer_cnt++;
441		set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
442		IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
443	}
444
445	s->dma_xfer_cnt++;
446	memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
447	s->sg_processing_size = s->sg_pending_size;
448	s->sg_pending_size = 0;
449	s->sg_processed = 0;
450	s->dma_offset = s->pending_offset;
451	s->dma_backup = s->pending_backup;
452	s->dma_pts = s->pending_pts;
453
454	if (ivtv_use_pio(s)) {
455		set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
456		set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
457		set_bit(IVTV_F_I_PIO, &itv->i_flags);
458		itv->cur_pio_stream = s->type;
459	}
460	else {
461		itv->dma_retries = 0;
462		ivtv_dma_enc_start_xfer(s);
463		set_bit(IVTV_F_I_DMA, &itv->i_flags);
464		itv->cur_dma_stream = s->type;
465	}
466}
467
468static void ivtv_dma_dec_start(struct ivtv_stream *s)
469{
470	struct ivtv *itv = s->itv;
471
472	if (s->q_predma.bytesused)
473		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
474	s->dma_xfer_cnt++;
475	memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
476	s->sg_processing_size = s->sg_pending_size;
477	s->sg_pending_size = 0;
478	s->sg_processed = 0;
479
480	IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
481	itv->dma_retries = 0;
482	ivtv_dma_dec_start_xfer(s);
483	set_bit(IVTV_F_I_DMA, &itv->i_flags);
484	itv->cur_dma_stream = s->type;
485}
486
487static void ivtv_irq_dma_read(struct ivtv *itv)
488{
489	struct ivtv_stream *s = NULL;
490	struct ivtv_buffer *buf;
491	int hw_stream_type = 0;
492
493	IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
494
495	del_timer(&itv->dma_timer);
496
497	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
498		return;
499
500	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
501		s = &itv->streams[itv->cur_dma_stream];
502		ivtv_stream_sync_for_cpu(s);
503
504		if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
505			IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
506					read_reg(IVTV_REG_DMASTATUS),
507					s->sg_processed, s->sg_processing_size, itv->dma_retries);
508			write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
509			if (itv->dma_retries == 3) {
510				/* Too many retries, give up on this frame */
511				itv->dma_retries = 0;
512				s->sg_processed = s->sg_processing_size;
513			}
514			else {
515				/* Retry, starting with the first xfer segment.
516				   Just retrying the current segment is not sufficient. */
517				s->sg_processed = 0;
518				itv->dma_retries++;
519			}
520		}
521		if (s->sg_processed < s->sg_processing_size) {
522			/* DMA next buffer */
523			ivtv_dma_dec_start_xfer(s);
524			return;
525		}
526		if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
527			hw_stream_type = 2;
528		IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
529
530		/* For some reason must kick the firmware, like PIO mode,
531		   I think this tells the firmware we are done and the size
532		   of the xfer so it can calculate what we need next.
533		   I think we can do this part ourselves but would have to
534		   fully calculate xfer info ourselves and not use interrupts
535		 */
536		ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
537				hw_stream_type);
538
539		/* Free last DMA call */
540		while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
541			ivtv_buf_sync_for_cpu(s, buf);
542			ivtv_enqueue(s, buf, &s->q_free);
543		}
544		wake_up(&s->waitq);
545	}
546	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
547	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
548	itv->cur_dma_stream = -1;
549	wake_up(&itv->dma_waitq);
550}
551
552static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
553{
554	u32 data[CX2341X_MBOX_MAX_DATA];
555	struct ivtv_stream *s;
556
557	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
558	IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
559
560	del_timer(&itv->dma_timer);
561
562	if (itv->cur_dma_stream < 0)
563		return;
564
565	s = &itv->streams[itv->cur_dma_stream];
566	ivtv_stream_sync_for_cpu(s);
567
568	if (data[0] & 0x18) {
569		IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
570			s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
571		write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
572		if (itv->dma_retries == 3) {
573			/* Too many retries, give up on this frame */
574			itv->dma_retries = 0;
575			s->sg_processed = s->sg_processing_size;
576		}
577		else {
578			/* Retry, starting with the first xfer segment.
579			   Just retrying the current segment is not sufficient. */
580			s->sg_processed = 0;
581			itv->dma_retries++;
582		}
583	}
584	if (s->sg_processed < s->sg_processing_size) {
585		/* DMA next buffer */
586		ivtv_dma_enc_start_xfer(s);
587		return;
588	}
589	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
590	itv->cur_dma_stream = -1;
591	dma_post(s);
592	if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
593		s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
594		dma_post(s);
595	}
596	s->sg_processing_size = 0;
597	s->sg_processed = 0;
598	wake_up(&itv->dma_waitq);
599}
600
601static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
602{
603	struct ivtv_stream *s;
604
605	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
606		itv->cur_pio_stream = -1;
607		return;
608	}
609	s = &itv->streams[itv->cur_pio_stream];
610	IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
611	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
612	itv->cur_pio_stream = -1;
613	dma_post(s);
614	if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
615		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
616	else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
617		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
618	else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
619		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
620	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
621	if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
622		s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
623		dma_post(s);
624	}
625	wake_up(&itv->dma_waitq);
626}
627
628static void ivtv_irq_dma_err(struct ivtv *itv)
629{
630	u32 data[CX2341X_MBOX_MAX_DATA];
631	u32 status;
632
633	del_timer(&itv->dma_timer);
634
635	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
636	status = read_reg(IVTV_REG_DMASTATUS);
637	IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
638				status, itv->cur_dma_stream);
639	/*
640	 * We do *not* write back to the IVTV_REG_DMASTATUS register to
641	 * clear the error status, if either the encoder write (0x02) or
642	 * decoder read (0x01) bus master DMA operation do not indicate
643	 * completed.  We can race with the DMA engine, which may have
644	 * transitioned to completed status *after* we read the register.
645	 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
646	 * DMA engine has completed, will cause the DMA engine to stop working.
647	 */
648	status &= 0x3;
649	if (status == 0x3)
650		write_reg(status, IVTV_REG_DMASTATUS);
651
652	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
653	    itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
654		struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
655
656		if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
657			/* retry */
658			/*
659			 * FIXME - handle cases of DMA error similar to
660			 * encoder below, except conditioned on status & 0x1
661			 */
662			ivtv_dma_dec_start(s);
663			return;
664		} else {
665			if ((status & 0x2) == 0) {
666				/*
667				 * CX2341x Bus Master DMA write is ongoing.
668				 * Reset the timer and let it complete.
669				 */
670				itv->dma_timer.expires =
671						jiffies + msecs_to_jiffies(600);
672				add_timer(&itv->dma_timer);
673				return;
674			}
675
676			if (itv->dma_retries < 3) {
677				/*
678				 * CX2341x Bus Master DMA write has ended.
679				 * Retry the write, starting with the first
680				 * xfer segment. Just retrying the current
681				 * segment is not sufficient.
682				 */
683				s->sg_processed = 0;
684				itv->dma_retries++;
685				ivtv_dma_enc_start_xfer(s);
686				return;
687			}
688			/* Too many retries, give up on this one */
689		}
690
691	}
692	if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
693		ivtv_udma_start(itv);
694		return;
695	}
696	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
697	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
698	itv->cur_dma_stream = -1;
699	wake_up(&itv->dma_waitq);
700}
701
702static void ivtv_irq_enc_start_cap(struct ivtv *itv)
703{
704	u32 data[CX2341X_MBOX_MAX_DATA];
705	struct ivtv_stream *s;
706
707	/* Get DMA destination and size arguments from card */
708	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
709	IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
710
711	if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
712		IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
713				data[0], data[1], data[2]);
714		return;
715	}
716	s = &itv->streams[ivtv_stream_map[data[0]]];
717	if (!stream_enc_dma_append(s, data)) {
718		set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
719	}
720}
721
722static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
723{
724	u32 data[CX2341X_MBOX_MAX_DATA];
725	struct ivtv_stream *s;
726
727	IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
728	s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
729
730	if (!stream_enc_dma_append(s, data))
731		set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
732}
733
734static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
735{
736	u32 data[CX2341X_MBOX_MAX_DATA];
737	struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
738
739	IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
740	if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
741			!stream_enc_dma_append(s, data)) {
742		set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
743	}
744}
745
746static void ivtv_irq_dec_data_req(struct ivtv *itv)
747{
748	u32 data[CX2341X_MBOX_MAX_DATA];
749	struct ivtv_stream *s;
750
751	/* YUV or MPG */
752
753	if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
754		ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
755		itv->dma_data_req_size =
756				 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
757		itv->dma_data_req_offset = data[1];
758		if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
759			ivtv_yuv_frame_complete(itv);
760		s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
761	}
762	else {
763		ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
764		itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
765		itv->dma_data_req_offset = data[1];
766		s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
767	}
768	IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
769		       itv->dma_data_req_offset, itv->dma_data_req_size);
770	if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
771		set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
772	}
773	else {
774		if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
775			ivtv_yuv_setup_stream_frame(itv);
776		clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
777		ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
778		ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
779	}
780}
781
782static void ivtv_irq_vsync(struct ivtv *itv)
783{
784	/* The vsync interrupt is unusual in that it won't clear until
785	 * the end of the first line for the current field, at which
786	 * point it clears itself. This can result in repeated vsync
787	 * interrupts, or a missed vsync. Read some of the registers
788	 * to determine the line being displayed and ensure we handle
789	 * one vsync per frame.
790	 */
791	unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
792	struct yuv_playback_info *yi = &itv->yuv_info;
793	int last_dma_frame = atomic_read(&yi->next_dma_frame);
794	struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
795
796	if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
797
798	if (((frame ^ f->sync_field) == 0 &&
799		((itv->last_vsync_field & 1) ^ f->sync_field)) ||
800			(frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
801		int next_dma_frame = last_dma_frame;
802
803		if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
804			if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
805				write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
806				write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
807				write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
808				write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
809				next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
810				atomic_set(&yi->next_dma_frame, next_dma_frame);
811				yi->fields_lapsed = -1;
812				yi->running = 1;
813			}
814		}
815	}
816	if (frame != (itv->last_vsync_field & 1)) {
817		static const struct v4l2_event evtop = {
818			.type = V4L2_EVENT_VSYNC,
819			.u.vsync.field = V4L2_FIELD_TOP,
820		};
821		static const struct v4l2_event evbottom = {
822			.type = V4L2_EVENT_VSYNC,
823			.u.vsync.field = V4L2_FIELD_BOTTOM,
824		};
825		struct ivtv_stream *s = ivtv_get_output_stream(itv);
826
827		itv->last_vsync_field += 1;
828		if (frame == 0) {
829			clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
830			clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
831		}
832		else {
833			set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
834		}
835		if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
836			set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
837			wake_up(&itv->event_waitq);
838			if (s)
839				wake_up(&s->waitq);
840		}
841		if (s && s->vdev)
842			v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
843		wake_up(&itv->vsync_waitq);
844
845		/* Send VBI to saa7127 */
846		if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
847			test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
848			test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
849			test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
850			set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
851			set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
852		}
853
854		/* Check if we need to update the yuv registers */
855		if (yi->running && (yi->yuv_forced_update || f->update)) {
856			if (!f->update) {
857				last_dma_frame =
858					(u8)(atomic_read(&yi->next_dma_frame) -
859						 1) % IVTV_YUV_BUFFERS;
860				f = &yi->new_frame_info[last_dma_frame];
861			}
862
863			if (f->src_w) {
864				yi->update_frame = last_dma_frame;
865				f->update = 0;
866				yi->yuv_forced_update = 0;
867				set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
868				set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
869			}
870		}
871
872		yi->fields_lapsed++;
873	}
874}
875
876#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
877
878irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
879{
880	struct ivtv *itv = (struct ivtv *)dev_id;
881	u32 combo;
882	u32 stat;
883	int i;
884	u8 vsync_force = 0;
885
886	spin_lock(&itv->dma_reg_lock);
887	/* get contents of irq status register */
888	stat = read_reg(IVTV_REG_IRQSTATUS);
889
890	combo = ~itv->irqmask & stat;
891
892	/* Clear out IRQ */
893	if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
894
895	if (0 == combo) {
896		/* The vsync interrupt is unusual and clears itself. If we
897		 * took too long, we may have missed it. Do some checks
898		 */
899		if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
900			/* vsync is enabled, see if we're in a new field */
901			if ((itv->last_vsync_field & 1) !=
902			    (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
903				/* New field, looks like we missed it */
904				IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
905				       read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
906				vsync_force = 1;
907			}
908		}
909
910		if (!vsync_force) {
911			/* No Vsync expected, wasn't for us */
912			spin_unlock(&itv->dma_reg_lock);
913			return IRQ_NONE;
914		}
915	}
916
917	/* Exclude interrupts noted below from the output, otherwise the log is flooded with
918	   these messages */
919	if (combo & ~0xff6d0400)
920		IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
921
922	if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
923		IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
924	}
925
926	if (combo & IVTV_IRQ_DMA_READ) {
927		ivtv_irq_dma_read(itv);
928	}
929
930	if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
931		ivtv_irq_enc_dma_complete(itv);
932	}
933
934	if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
935		ivtv_irq_enc_pio_complete(itv);
936	}
937
938	if (combo & IVTV_IRQ_DMA_ERR) {
939		ivtv_irq_dma_err(itv);
940	}
941
942	if (combo & IVTV_IRQ_ENC_START_CAP) {
943		ivtv_irq_enc_start_cap(itv);
944	}
945
946	if (combo & IVTV_IRQ_ENC_VBI_CAP) {
947		ivtv_irq_enc_vbi_cap(itv);
948	}
949
950	if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
951		ivtv_irq_dec_vbi_reinsert(itv);
952	}
953
954	if (combo & IVTV_IRQ_ENC_EOS) {
955		IVTV_DEBUG_IRQ("ENC EOS\n");
956		set_bit(IVTV_F_I_EOS, &itv->i_flags);
957		wake_up(&itv->eos_waitq);
958	}
959
960	if (combo & IVTV_IRQ_DEC_DATA_REQ) {
961		ivtv_irq_dec_data_req(itv);
962	}
963
964	/* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
965	if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
966		ivtv_irq_vsync(itv);
967	}
968
969	if (combo & IVTV_IRQ_ENC_VIM_RST) {
970		IVTV_DEBUG_IRQ("VIM RST\n");
971		/*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
972	}
973
974	if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
975		IVTV_DEBUG_INFO("Stereo mode changed\n");
976	}
977
978	if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
979		itv->irq_rr_idx++;
980		for (i = 0; i < IVTV_MAX_STREAMS; i++) {
981			int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
982			struct ivtv_stream *s = &itv->streams[idx];
983
984			if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
985				continue;
986			if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
987				ivtv_dma_dec_start(s);
988			else
989				ivtv_dma_enc_start(s);
990			break;
991		}
992
993		if (i == IVTV_MAX_STREAMS &&
994		    test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
995			ivtv_udma_start(itv);
996	}
997
998	if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
999		itv->irq_rr_idx++;
1000		for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1001			int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1002			struct ivtv_stream *s = &itv->streams[idx];
1003
1004			if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
1005				continue;
1006			if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
1007				ivtv_dma_enc_start(s);
1008			break;
1009		}
1010	}
1011
1012	if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
1013		queue_kthread_work(&itv->irq_worker, &itv->irq_work);
1014	}
1015
1016	spin_unlock(&itv->dma_reg_lock);
1017
1018	/* If we've just handled a 'forced' vsync, it's safest to say it
1019	 * wasn't ours. Another device may have triggered it at just
1020	 * the right time.
1021	 */
1022	return vsync_force ? IRQ_NONE : IRQ_HANDLED;
1023}
1024
1025void ivtv_unfinished_dma(unsigned long arg)
1026{
1027	struct ivtv *itv = (struct ivtv *)arg;
1028
1029	if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
1030		return;
1031	IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
1032
1033	write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1034	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
1035	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
1036	itv->cur_dma_stream = -1;
1037	wake_up(&itv->dma_waitq);
1038}
1039