cx18-mailbox.c revision 754f9969c323559a12bce1475f3c1e6574129856
1/*
2 *  cx18 mailbox functions
3 *
4 *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
5 *  Copyright (C) 2008  Andy Walls <awalls@md.metrocast.net>
6 *
7 *  This program is free software; you can redistribute it and/or modify
8 *  it under the terms of the GNU General Public License as published by
9 *  the Free Software Foundation; either version 2 of the License, or
10 *  (at your option) any later version.
11 *
12 *  This program is distributed in the hope that it will be useful,
13 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 *  GNU General Public License for more details.
16 *
17 *  You should have received a copy of the GNU General Public License
18 *  along with this program; if not, write to the Free Software
19 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 *  02111-1307  USA
21 */
22
23#include <stdarg.h>
24
25#include "cx18-driver.h"
26#include "cx18-io.h"
27#include "cx18-scb.h"
28#include "cx18-irq.h"
29#include "cx18-mailbox.h"
30#include "cx18-queue.h"
31#include "cx18-streams.h"
32#include "cx18-alsa-pcm.h" /* FIXME make configurable */
33
34static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" };
35
36#define API_FAST (1 << 2) /* Short timeout */
37#define API_SLOW (1 << 3) /* Additional 300ms timeout */
38
39struct cx18_api_info {
40	u32 cmd;
41	u8 flags;		/* Flags, see above */
42	u8 rpu;			/* Processing unit */
43	const char *name; 	/* The name of the command */
44};
45
46#define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
47
48static const struct cx18_api_info api_info[] = {
49	/* MPEG encoder API */
50	API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE,		0),
51	API_ENTRY(CPU, CX18_EPU_DEBUG, 				0),
52	API_ENTRY(CPU, CX18_CREATE_TASK, 			0),
53	API_ENTRY(CPU, CX18_DESTROY_TASK, 			0),
54	API_ENTRY(CPU, CX18_CPU_CAPTURE_START,                  API_SLOW),
55	API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP,                   API_SLOW),
56	API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE,                  0),
57	API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME,                 0),
58	API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE,               0),
59	API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE,         0),
60	API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN,                   0),
61	API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE,                 0),
62	API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION,           0),
63	API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM,               0),
64	API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE,        0),
65	API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING,              0),
66	API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE,                 0),
67	API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS,           0),
68	API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE,                 0),
69	API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE,                 0),
70	API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS,            0),
71	API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM,              API_SLOW),
72	API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO,            0),
73	API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT,                  0),
74	API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID,                  0),
75	API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID,                  0),
76	API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE,              0),
77	API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE,              0),
78	API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION,     0),
79	API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO,               0),
80	API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME,           0),
81	API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM,           0),
82	API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER,      0),
83	API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS,                    0),
84	API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK,			0),
85	API_ENTRY(CPU, CX18_CPU_DE_SET_MDL,			API_FAST),
86	API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL,			API_SLOW),
87	API_ENTRY(APU, CX18_APU_START,				0),
88	API_ENTRY(APU, CX18_APU_STOP,				0),
89	API_ENTRY(APU, CX18_APU_RESETAI,			0),
90	API_ENTRY(CPU, CX18_CPU_DEBUG_PEEK32,			0),
91	API_ENTRY(0, 0,						0),
92};
93
94static const struct cx18_api_info *find_api_info(u32 cmd)
95{
96	int i;
97
98	for (i = 0; api_info[i].cmd; i++)
99		if (api_info[i].cmd == cmd)
100			return &api_info[i];
101	return NULL;
102}
103
104/* Call with buf of n*11+1 bytes */
105static char *u32arr2hex(u32 data[], int n, char *buf)
106{
107	char *p;
108	int i;
109
110	for (i = 0, p = buf; i < n; i++, p += 11) {
111		/* kernel snprintf() appends '\0' always */
112		snprintf(p, 12, " %#010x", data[i]);
113	}
114	*p = '\0';
115	return buf;
116}
117
118static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
119{
120	char argstr[MAX_MB_ARGUMENTS*11+1];
121
122	if (!(cx18_debug & CX18_DBGFLG_API))
123		return;
124
125	CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
126		       "\n", name, mb->request, mb->ack, mb->cmd, mb->error,
127		       u32arr2hex(mb->args, MAX_MB_ARGUMENTS, argstr));
128}
129
130
131/*
132 * Functions that run in a work_queue work handling context
133 */
134
135static void cx18_mdl_send_to_dvb(struct cx18_stream *s, struct cx18_mdl *mdl)
136{
137	struct cx18_buffer *buf;
138
139	if (s->dvb == NULL || !s->dvb->enabled || mdl->bytesused == 0)
140		return;
141
142	/* We ignore mdl and buf readpos accounting here - it doesn't matter */
143
144	/* The likely case */
145	if (list_is_singular(&mdl->buf_list)) {
146		buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
147				       list);
148		if (buf->bytesused)
149			dvb_dmx_swfilter(&s->dvb->demux,
150					 buf->buf, buf->bytesused);
151		return;
152	}
153
154	list_for_each_entry(buf, &mdl->buf_list, list) {
155		if (buf->bytesused == 0)
156			break;
157		dvb_dmx_swfilter(&s->dvb->demux, buf->buf, buf->bytesused);
158	}
159}
160
161
162static void cx18_mdl_send_to_alsa(struct cx18 *cx, struct cx18_stream *s,
163				  struct cx18_mdl *mdl)
164{
165	struct cx18_buffer *buf;
166
167	if (mdl->bytesused == 0)
168		return;
169
170	/* We ignore mdl and buf readpos accounting here - it doesn't matter */
171
172	/* The likely case */
173	if (list_is_singular(&mdl->buf_list)) {
174		buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
175				       list);
176		if (buf->bytesused)
177			cx->pcm_announce_callback(cx->alsa, buf->buf,
178						  buf->bytesused);
179		return;
180	}
181
182	list_for_each_entry(buf, &mdl->buf_list, list) {
183		if (buf->bytesused == 0)
184			break;
185		cx->pcm_announce_callback(cx->alsa, buf->buf, buf->bytesused);
186	}
187}
188
189static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
190{
191	u32 handle, mdl_ack_count, id;
192	struct cx18_mailbox *mb;
193	struct cx18_mdl_ack *mdl_ack;
194	struct cx18_stream *s;
195	struct cx18_mdl *mdl;
196	int i;
197
198	mb = &order->mb;
199	handle = mb->args[0];
200	s = cx18_handle_to_stream(cx, handle);
201
202	if (s == NULL) {
203		CX18_WARN("Got DMA done notification for unknown/inactive"
204			  " handle %d, %s mailbox seq no %d\n", handle,
205			  (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
206			  "stale" : "good", mb->request);
207		return;
208	}
209
210	mdl_ack_count = mb->args[2];
211	mdl_ack = order->mdl_ack;
212	for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
213		id = mdl_ack->id;
214		/*
215		 * Simple integrity check for processing a stale (and possibly
216		 * inconsistent mailbox): make sure the MDL id is in the
217		 * valid range for the stream.
218		 *
219		 * We go through the trouble of dealing with stale mailboxes
220		 * because most of the time, the mailbox data is still valid and
221		 * unchanged (and in practice the firmware ping-pongs the
222		 * two mdl_ack buffers so mdl_acks are not stale).
223		 *
224		 * There are occasions when we get a half changed mailbox,
225		 * which this check catches for a handle & id mismatch.  If the
226		 * handle and id do correspond, the worst case is that we
227		 * completely lost the old MDL, but pick up the new MDL
228		 * early (but the new mdl_ack is guaranteed to be good in this
229		 * case as the firmware wouldn't point us to a new mdl_ack until
230		 * it's filled in).
231		 *
232		 * cx18_queue_get_mdl() will detect the lost MDLs
233		 * and send them back to q_free for fw rotation eventually.
234		 */
235		if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
236		    !(id >= s->mdl_base_idx &&
237		      id < (s->mdl_base_idx + s->buffers))) {
238			CX18_WARN("Fell behind! Ignoring stale mailbox with "
239				  " inconsistent data. Lost MDL for mailbox "
240				  "seq no %d\n", mb->request);
241			break;
242		}
243		mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used);
244
245		CX18_DEBUG_HI_DMA("DMA DONE for %s (MDL %d)\n", s->name, id);
246		if (mdl == NULL) {
247			CX18_WARN("Could not find MDL %d for stream %s\n",
248				  id, s->name);
249			continue;
250		}
251
252		CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n",
253				  s->name, mdl->bytesused);
254
255		if (s->type == CX18_ENC_STREAM_TYPE_TS) {
256			cx18_mdl_send_to_dvb(s, mdl);
257			cx18_enqueue(s, mdl, &s->q_free);
258		} else if (s->type == CX18_ENC_STREAM_TYPE_PCM) {
259			/* Pass the data to cx18-alsa */
260			if (cx->pcm_announce_callback != NULL) {
261				cx18_mdl_send_to_alsa(cx, s, mdl);
262				cx18_enqueue(s, mdl, &s->q_free);
263			} else {
264				cx18_enqueue(s, mdl, &s->q_full);
265			}
266		} else {
267			cx18_enqueue(s, mdl, &s->q_full);
268			if (s->type == CX18_ENC_STREAM_TYPE_IDX)
269				cx18_stream_rotate_idx_mdls(cx);
270		}
271	}
272	/* Put as many MDLs as possible back into fw use */
273	cx18_stream_load_fw_queue(s);
274
275	wake_up(&cx->dma_waitq);
276	if (s->id != -1)
277		wake_up(&s->waitq);
278}
279
280static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order)
281{
282	char *p;
283	char *str = order->str;
284
285	CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
286	p = strchr(str, '.');
287	if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
288		CX18_INFO("FW version: %s\n", p - 1);
289}
290
291static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order)
292{
293	switch (order->rpu) {
294	case CPU:
295	{
296		switch (order->mb.cmd) {
297		case CX18_EPU_DMA_DONE:
298			epu_dma_done(cx, order);
299			break;
300		case CX18_EPU_DEBUG:
301			epu_debug(cx, order);
302			break;
303		default:
304			CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
305				  order->mb.cmd);
306			break;
307		}
308		break;
309	}
310	case APU:
311		CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
312			  order->mb.cmd);
313		break;
314	default:
315		break;
316	}
317}
318
319static
320void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order)
321{
322	atomic_set(&order->pending, 0);
323}
324
325void cx18_in_work_handler(struct work_struct *work)
326{
327	struct cx18_in_work_order *order =
328			container_of(work, struct cx18_in_work_order, work);
329	struct cx18 *cx = order->cx;
330	epu_cmd(cx, order);
331	free_in_work_order(cx, order);
332}
333
334
335/*
336 * Functions that run in an interrupt handling context
337 */
338
339static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
340{
341	struct cx18_mailbox __iomem *ack_mb;
342	u32 ack_irq, req;
343
344	switch (order->rpu) {
345	case APU:
346		ack_irq = IRQ_EPU_TO_APU_ACK;
347		ack_mb = &cx->scb->apu2epu_mb;
348		break;
349	case CPU:
350		ack_irq = IRQ_EPU_TO_CPU_ACK;
351		ack_mb = &cx->scb->cpu2epu_mb;
352		break;
353	default:
354		CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
355			  order->rpu, order->mb.cmd);
356		return;
357	}
358
359	req = order->mb.request;
360	/* Don't ack if the RPU has gotten impatient and timed us out */
361	if (req != cx18_readl(cx, &ack_mb->request) ||
362	    req == cx18_readl(cx, &ack_mb->ack)) {
363		CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
364				"incoming %s to EPU mailbox (sequence no. %u) "
365				"while processing\n",
366				rpu_str[order->rpu], rpu_str[order->rpu], req);
367		order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
368		return;
369	}
370	cx18_writel(cx, req, &ack_mb->ack);
371	cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
372	return;
373}
374
375static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
376{
377	u32 handle, mdl_ack_offset, mdl_ack_count;
378	struct cx18_mailbox *mb;
379
380	mb = &order->mb;
381	handle = mb->args[0];
382	mdl_ack_offset = mb->args[1];
383	mdl_ack_count = mb->args[2];
384
385	if (handle == CX18_INVALID_TASK_HANDLE ||
386	    mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
387		if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
388			mb_ack_irq(cx, order);
389		return -1;
390	}
391
392	cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
393			   sizeof(struct cx18_mdl_ack) * mdl_ack_count);
394
395	if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
396		mb_ack_irq(cx, order);
397	return 1;
398}
399
400static
401int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order)
402{
403	u32 str_offset;
404	char *str = order->str;
405
406	str[0] = '\0';
407	str_offset = order->mb.args[1];
408	if (str_offset) {
409		cx18_setup_page(cx, str_offset);
410		cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252);
411		str[252] = '\0';
412		cx18_setup_page(cx, SCB_OFFSET);
413	}
414
415	if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
416		mb_ack_irq(cx, order);
417
418	return str_offset ? 1 : 0;
419}
420
421static inline
422int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order)
423{
424	int ret = -1;
425
426	switch (order->rpu) {
427	case CPU:
428	{
429		switch (order->mb.cmd) {
430		case CX18_EPU_DMA_DONE:
431			ret = epu_dma_done_irq(cx, order);
432			break;
433		case CX18_EPU_DEBUG:
434			ret = epu_debug_irq(cx, order);
435			break;
436		default:
437			CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
438				  order->mb.cmd);
439			break;
440		}
441		break;
442	}
443	case APU:
444		CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
445			  order->mb.cmd);
446		break;
447	default:
448		break;
449	}
450	return ret;
451}
452
453static inline
454struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx)
455{
456	int i;
457	struct cx18_in_work_order *order = NULL;
458
459	for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) {
460		/*
461		 * We only need "pending" atomic to inspect its contents,
462		 * and need not do a check and set because:
463		 * 1. Any work handler thread only clears "pending" and only
464		 * on one, particular work order at a time, per handler thread.
465		 * 2. "pending" is only set here, and we're serialized because
466		 * we're called in an IRQ handler context.
467		 */
468		if (atomic_read(&cx->in_work_order[i].pending) == 0) {
469			order = &cx->in_work_order[i];
470			atomic_set(&order->pending, 1);
471			break;
472		}
473	}
474	return order;
475}
476
477void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
478{
479	struct cx18_mailbox __iomem *mb;
480	struct cx18_mailbox *order_mb;
481	struct cx18_in_work_order *order;
482	int submit;
483
484	switch (rpu) {
485	case CPU:
486		mb = &cx->scb->cpu2epu_mb;
487		break;
488	case APU:
489		mb = &cx->scb->apu2epu_mb;
490		break;
491	default:
492		return;
493	}
494
495	order = alloc_in_work_order_irq(cx);
496	if (order == NULL) {
497		CX18_WARN("Unable to find blank work order form to schedule "
498			  "incoming mailbox command processing\n");
499		return;
500	}
501
502	order->flags = 0;
503	order->rpu = rpu;
504	order_mb = &order->mb;
505
506	/* mb->cmd and mb->args[0] through mb->args[2] */
507	cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
508	/* mb->request and mb->ack.  N.B. we want to read mb->ack last */
509	cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
510			   2 * sizeof(u32));
511
512	if (order_mb->request == order_mb->ack) {
513		CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
514				"incoming %s to EPU mailbox (sequence no. %u)"
515				"\n",
516				rpu_str[rpu], rpu_str[rpu], order_mb->request);
517		if (cx18_debug & CX18_DBGFLG_WARN)
518			dump_mb(cx, order_mb, "incoming");
519		order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
520	}
521
522	/*
523	 * Individual EPU command processing is responsible for ack-ing
524	 * a non-stale mailbox as soon as possible
525	 */
526	submit = epu_cmd_irq(cx, order);
527	if (submit > 0) {
528		queue_work(cx->in_work_queue, &order->work);
529	}
530}
531
532
533/*
534 * Functions called from a non-interrupt, non work_queue context
535 */
536
537static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
538{
539	const struct cx18_api_info *info = find_api_info(cmd);
540	u32 state, irq, req, ack, err;
541	struct cx18_mailbox __iomem *mb;
542	u32 __iomem *xpu_state;
543	wait_queue_head_t *waitq;
544	struct mutex *mb_lock;
545	unsigned long int t0, timeout, ret;
546	int i;
547	char argstr[MAX_MB_ARGUMENTS*11+1];
548	DEFINE_WAIT(w);
549
550	if (info == NULL) {
551		CX18_WARN("unknown cmd %x\n", cmd);
552		return -EINVAL;
553	}
554
555	if (cx18_debug & CX18_DBGFLG_API) { /* only call u32arr2hex if needed */
556		if (cmd == CX18_CPU_DE_SET_MDL) {
557			if (cx18_debug & CX18_DBGFLG_HIGHVOL)
558				CX18_DEBUG_HI_API("%s\tcmd %#010x args%s\n",
559						info->name, cmd,
560						u32arr2hex(data, args, argstr));
561		} else
562			CX18_DEBUG_API("%s\tcmd %#010x args%s\n",
563				       info->name, cmd,
564				       u32arr2hex(data, args, argstr));
565	}
566
567	switch (info->rpu) {
568	case APU:
569		waitq = &cx->mb_apu_waitq;
570		mb_lock = &cx->epu2apu_mb_lock;
571		irq = IRQ_EPU_TO_APU;
572		mb = &cx->scb->epu2apu_mb;
573		xpu_state = &cx->scb->apu_state;
574		break;
575	case CPU:
576		waitq = &cx->mb_cpu_waitq;
577		mb_lock = &cx->epu2cpu_mb_lock;
578		irq = IRQ_EPU_TO_CPU;
579		mb = &cx->scb->epu2cpu_mb;
580		xpu_state = &cx->scb->cpu_state;
581		break;
582	default:
583		CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu);
584		return -EINVAL;
585	}
586
587	mutex_lock(mb_lock);
588	/*
589	 * Wait for an in-use mailbox to complete
590	 *
591	 * If the XPU is responding with Ack's, the mailbox shouldn't be in
592	 * a busy state, since we serialize access to it on our end.
593	 *
594	 * If the wait for ack after sending a previous command was interrupted
595	 * by a signal, we may get here and find a busy mailbox.  After waiting,
596	 * mark it "not busy" from our end, if the XPU hasn't ack'ed it still.
597	 */
598	state = cx18_readl(cx, xpu_state);
599	req = cx18_readl(cx, &mb->request);
600	timeout = msecs_to_jiffies(10);
601	ret = wait_event_timeout(*waitq,
602				 (ack = cx18_readl(cx, &mb->ack)) == req,
603				 timeout);
604	if (req != ack) {
605		/* waited long enough, make the mbox "not busy" from our end */
606		cx18_writel(cx, req, &mb->ack);
607		CX18_ERR("mbox was found stuck busy when setting up for %s; "
608			 "clearing busy and trying to proceed\n", info->name);
609	} else if (ret != timeout)
610		CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
611			       jiffies_to_msecs(timeout-ret));
612
613	/* Build the outgoing mailbox */
614	req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1;
615
616	cx18_writel(cx, cmd, &mb->cmd);
617	for (i = 0; i < args; i++)
618		cx18_writel(cx, data[i], &mb->args[i]);
619	cx18_writel(cx, 0, &mb->error);
620	cx18_writel(cx, req, &mb->request);
621	cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */
622
623	/*
624	 * Notify the XPU and wait for it to send an Ack back
625	 */
626	timeout = msecs_to_jiffies((info->flags & API_FAST) ? 10 : 20);
627
628	CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n",
629			  irq, info->name);
630
631	/* So we don't miss the wakeup, prepare to wait before notifying fw */
632	prepare_to_wait(waitq, &w, TASK_UNINTERRUPTIBLE);
633	cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq);
634
635	t0 = jiffies;
636	ack = cx18_readl(cx, &mb->ack);
637	if (ack != req) {
638		schedule_timeout(timeout);
639		ret = jiffies - t0;
640		ack = cx18_readl(cx, &mb->ack);
641	} else {
642		ret = jiffies - t0;
643	}
644
645	finish_wait(waitq, &w);
646
647	if (req != ack) {
648		mutex_unlock(mb_lock);
649		if (ret >= timeout) {
650			/* Timed out */
651			CX18_DEBUG_WARN("sending %s timed out waiting %d msecs "
652					"for RPU acknowledgement\n",
653					info->name, jiffies_to_msecs(ret));
654		} else {
655			CX18_DEBUG_WARN("woken up before mailbox ack was ready "
656					"after submitting %s to RPU.  only "
657					"waited %d msecs on req %u but awakened"
658					" with unmatched ack %u\n",
659					info->name,
660					jiffies_to_msecs(ret),
661					req, ack);
662		}
663		return -EINVAL;
664	}
665
666	if (ret >= timeout)
667		CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment "
668				"sending %s; timed out waiting %d msecs\n",
669				info->name, jiffies_to_msecs(ret));
670	else
671		CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
672				  jiffies_to_msecs(ret), info->name);
673
674	/* Collect data returned by the XPU */
675	for (i = 0; i < MAX_MB_ARGUMENTS; i++)
676		data[i] = cx18_readl(cx, &mb->args[i]);
677	err = cx18_readl(cx, &mb->error);
678	mutex_unlock(mb_lock);
679
680	/*
681	 * Wait for XPU to perform extra actions for the caller in some cases.
682	 * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all MDLs
683	 * back in a burst shortly thereafter
684	 */
685	if (info->flags & API_SLOW)
686		cx18_msleep_timeout(300, 0);
687
688	if (err)
689		CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
690				info->name);
691	return err ? -EIO : 0;
692}
693
694int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
695{
696	return cx18_api_call(cx, cmd, args, data);
697}
698
699static int cx18_set_filter_param(struct cx18_stream *s)
700{
701	struct cx18 *cx = s->cx;
702	u32 mode;
703	int ret;
704
705	mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0);
706	ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
707			s->handle, 1, mode, cx->spatial_strength);
708	mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0);
709	ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
710			s->handle, 0, mode, cx->temporal_strength);
711	ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
712			s->handle, 2, cx->filter_mode >> 2, 0);
713	return ret;
714}
715
716int cx18_api_func(void *priv, u32 cmd, int in, int out,
717		u32 data[CX2341X_MBOX_MAX_DATA])
718{
719	struct cx18_api_func_private *api_priv = priv;
720	struct cx18 *cx = api_priv->cx;
721	struct cx18_stream *s = api_priv->s;
722
723	switch (cmd) {
724	case CX2341X_ENC_SET_OUTPUT_PORT:
725		return 0;
726	case CX2341X_ENC_SET_FRAME_RATE:
727		return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6,
728				s->handle, 0, 0, 0, 0, data[0]);
729	case CX2341X_ENC_SET_FRAME_SIZE:
730		return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3,
731				s->handle, data[1], data[0]);
732	case CX2341X_ENC_SET_STREAM_TYPE:
733		return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2,
734				s->handle, data[0]);
735	case CX2341X_ENC_SET_ASPECT_RATIO:
736		return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2,
737				s->handle, data[0]);
738
739	case CX2341X_ENC_SET_GOP_PROPERTIES:
740		return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3,
741				s->handle, data[0], data[1]);
742	case CX2341X_ENC_SET_GOP_CLOSURE:
743		return 0;
744	case CX2341X_ENC_SET_AUDIO_PROPERTIES:
745		return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
746				s->handle, data[0]);
747	case CX2341X_ENC_MUTE_AUDIO:
748		return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
749				s->handle, data[0]);
750	case CX2341X_ENC_SET_BIT_RATE:
751		return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5,
752				s->handle, data[0], data[1], data[2], data[3]);
753	case CX2341X_ENC_MUTE_VIDEO:
754		return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
755				s->handle, data[0]);
756	case CX2341X_ENC_SET_FRAME_DROP_RATE:
757		return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2,
758				s->handle, data[0]);
759	case CX2341X_ENC_MISC:
760		return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4,
761				s->handle, data[0], data[1], data[2]);
762	case CX2341X_ENC_SET_DNR_FILTER_MODE:
763		cx->filter_mode = (data[0] & 3) | (data[1] << 2);
764		return cx18_set_filter_param(s);
765	case CX2341X_ENC_SET_DNR_FILTER_PROPS:
766		cx->spatial_strength = data[0];
767		cx->temporal_strength = data[1];
768		return cx18_set_filter_param(s);
769	case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
770		return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3,
771				s->handle, data[0], data[1]);
772	case CX2341X_ENC_SET_CORING_LEVELS:
773		return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5,
774				s->handle, data[0], data[1], data[2], data[3]);
775	}
776	CX18_WARN("Unknown cmd %x\n", cmd);
777	return 0;
778}
779
780int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS],
781		u32 cmd, int args, ...)
782{
783	va_list ap;
784	int i;
785
786	va_start(ap, args);
787	for (i = 0; i < args; i++)
788		data[i] = va_arg(ap, u32);
789	va_end(ap);
790	return cx18_api(cx, cmd, args, data);
791}
792
793int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
794{
795	u32 data[MAX_MB_ARGUMENTS];
796	va_list ap;
797	int i;
798
799	if (cx == NULL) {
800		CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
801		return 0;
802	}
803	if (args > MAX_MB_ARGUMENTS) {
804		CX18_ERR("args too big (cmd=%x)\n", cmd);
805		args = MAX_MB_ARGUMENTS;
806	}
807	va_start(ap, args);
808	for (i = 0; i < args; i++)
809		data[i] = va_arg(ap, u32);
810	va_end(ap);
811	return cx18_api(cx, cmd, args, data);
812}
813