cx18-mailbox.c revision 2bb49f1b9f6a4f50222bc8a6b1e9df87a432c52c
1/* 2 * cx18 mailbox functions 3 * 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 * 02111-1307 USA 20 */ 21 22#include <stdarg.h> 23 24#include "cx18-driver.h" 25#include "cx18-io.h" 26#include "cx18-scb.h" 27#include "cx18-irq.h" 28#include "cx18-mailbox.h" 29#include "cx18-queue.h" 30#include "cx18-streams.h" 31 32static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" }; 33 34#define API_FAST (1 << 2) /* Short timeout */ 35#define API_SLOW (1 << 3) /* Additional 300ms timeout */ 36 37struct cx18_api_info { 38 u32 cmd; 39 u8 flags; /* Flags, see above */ 40 u8 rpu; /* Processing unit */ 41 const char *name; /* The name of the command */ 42}; 43 44#define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x } 45 46static const struct cx18_api_info api_info[] = { 47 /* MPEG encoder API */ 48 API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0), 49 API_ENTRY(CPU, CX18_EPU_DEBUG, 0), 50 API_ENTRY(CPU, CX18_CREATE_TASK, 0), 51 API_ENTRY(CPU, CX18_DESTROY_TASK, 0), 52 API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW), 53 API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW), 54 API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0), 55 API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0), 56 API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0), 57 API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0), 58 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0), 59 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0), 60 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0), 61 API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0), 62 API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0), 63 API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0), 64 API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0), 65 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0), 66 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0), 67 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0), 68 API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0), 69 API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW), 70 API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0), 71 API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0), 72 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0), 73 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0), 74 API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0), 75 API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0), 76 API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0), 77 API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0), 78 API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0), 79 API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0), 80 API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0), 81 API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0), 82 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0), 83 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST), 84 API_ENTRY(CPU, CX18_APU_RESETAI, API_FAST), 85 API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW), 86 API_ENTRY(0, 0, 0), 87}; 88 89static const struct cx18_api_info *find_api_info(u32 cmd) 90{ 91 int i; 92 93 for (i = 0; api_info[i].cmd; i++) 94 if (api_info[i].cmd == cmd) 95 return &api_info[i]; 96 return NULL; 97} 98 99static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name) 100{ 101 char argstr[MAX_MB_ARGUMENTS*11+1]; 102 char *p; 103 int i; 104 105 if (!(cx18_debug & CX18_DBGFLG_API)) 106 return; 107 108 for (i = 0, p = argstr; i < MAX_MB_ARGUMENTS; i++, p += 11) { 109 /* kernel snprintf() appends '\0' always */ 110 snprintf(p, 12, " %#010x", mb->args[i]); 111 } 112 CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s" 113 "\n", name, mb->request, mb->ack, mb->cmd, mb->error, argstr); 114} 115 116 117/* 118 * Functions that run in a work_queue work handling context 119 */ 120 121static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order) 122{ 123 u32 handle, mdl_ack_count, id; 124 struct cx18_mailbox *mb; 125 struct cx18_mdl_ack *mdl_ack; 126 struct cx18_stream *s; 127 struct cx18_buffer *buf; 128 int i; 129 130 mb = &order->mb; 131 handle = mb->args[0]; 132 s = cx18_handle_to_stream(cx, handle); 133 134 if (s == NULL) { 135 CX18_WARN("Got DMA done notification for unknown/inactive" 136 " handle %d, %s mailbox seq no %d\n", handle, 137 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? 138 "stale" : "good", mb->request); 139 return; 140 } 141 142 mdl_ack_count = mb->args[2]; 143 mdl_ack = order->mdl_ack; 144 for (i = 0; i < mdl_ack_count; i++, mdl_ack++) { 145 id = mdl_ack->id; 146 /* 147 * Simple integrity check for processing a stale (and possibly 148 * inconsistent mailbox): make sure the buffer id is in the 149 * valid range for the stream. 150 * 151 * We go through the trouble of dealing with stale mailboxes 152 * because most of the time, the mailbox data is still valid and 153 * unchanged (and in practice the firmware ping-pongs the 154 * two mdl_ack buffers so mdl_acks are not stale). 155 * 156 * There are occasions when we get a half changed mailbox, 157 * which this check catches for a handle & id mismatch. If the 158 * handle and id do correspond, the worst case is that we 159 * completely lost the old buffer, but pick up the new buffer 160 * early (but the new mdl_ack is guaranteed to be good in this 161 * case as the firmware wouldn't point us to a new mdl_ack until 162 * it's filled in). 163 * 164 * cx18_queue_get buf() will detect the lost buffers 165 * and put them back in rotation eventually. 166 */ 167 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && 168 !(id >= s->mdl_offset && 169 id < (s->mdl_offset + s->buffers))) { 170 CX18_WARN("Fell behind! Ignoring stale mailbox with " 171 " inconsistent data. Lost buffer for mailbox " 172 "seq no %d\n", mb->request); 173 break; 174 } 175 buf = cx18_queue_get_buf(s, id, mdl_ack->data_used); 176 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id); 177 if (buf == NULL) { 178 CX18_WARN("Could not find buf %d for stream %s\n", 179 id, s->name); 180 continue; 181 } 182 183 cx18_buf_sync_for_cpu(s, buf); 184 if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) { 185 CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n", 186 buf->bytesused); 187 188 dvb_dmx_swfilter(&s->dvb.demux, buf->buf, 189 buf->bytesused); 190 191 cx18_buf_sync_for_device(s, buf); 192 cx18_enqueue(s, buf, &s->q_free); 193 194 if (s->handle != CX18_INVALID_TASK_HANDLE && 195 test_bit(CX18_F_S_STREAMING, &s->s_flags)) 196 cx18_vapi(cx, 197 CX18_CPU_DE_SET_MDL, 5, s->handle, 198 (void __iomem *) 199 &cx->scb->cpu_mdl[buf->id] - cx->enc_mem, 200 1, buf->id, s->buf_size); 201 } else 202 set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags); 203 } 204 wake_up(&cx->dma_waitq); 205 if (s->id != -1) 206 wake_up(&s->waitq); 207} 208 209static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order) 210{ 211 char *p; 212 char *str = order->str; 213 214 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); 215 p = strchr(str, '.'); 216 if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str) 217 CX18_INFO("FW version: %s\n", p - 1); 218} 219 220static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order) 221{ 222 switch (order->rpu) { 223 case CPU: 224 { 225 switch (order->mb.cmd) { 226 case CX18_EPU_DMA_DONE: 227 epu_dma_done(cx, order); 228 break; 229 case CX18_EPU_DEBUG: 230 epu_debug(cx, order); 231 break; 232 default: 233 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n", 234 order->mb.cmd); 235 break; 236 } 237 break; 238 } 239 case APU: 240 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n", 241 order->mb.cmd); 242 break; 243 default: 244 break; 245 } 246} 247 248static 249void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order) 250{ 251 atomic_set(&order->pending, 0); 252} 253 254void cx18_epu_work_handler(struct work_struct *work) 255{ 256 struct cx18_epu_work_order *order = 257 container_of(work, struct cx18_epu_work_order, work); 258 struct cx18 *cx = order->cx; 259 epu_cmd(cx, order); 260 free_epu_work_order(cx, order); 261} 262 263 264/* 265 * Functions that run in an interrupt handling context 266 */ 267 268static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 269{ 270 struct cx18_mailbox __iomem *ack_mb; 271 u32 ack_irq, req; 272 273 switch (order->rpu) { 274 case APU: 275 ack_irq = IRQ_EPU_TO_APU_ACK; 276 ack_mb = &cx->scb->apu2epu_mb; 277 break; 278 case CPU: 279 ack_irq = IRQ_EPU_TO_CPU_ACK; 280 ack_mb = &cx->scb->cpu2epu_mb; 281 break; 282 default: 283 CX18_WARN("Unhandled RPU (%d) for command %x ack\n", 284 order->rpu, order->mb.cmd); 285 return; 286 } 287 288 req = order->mb.request; 289 /* Don't ack if the RPU has gotten impatient and timed us out */ 290 if (req != cx18_readl(cx, &ack_mb->request) || 291 req == cx18_readl(cx, &ack_mb->ack)) { 292 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our " 293 "incoming %s to EPU mailbox (sequence no. %u) " 294 "while processing\n", 295 rpu_str[order->rpu], rpu_str[order->rpu], req); 296 order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC; 297 return; 298 } 299 cx18_writel(cx, req, &ack_mb->ack); 300 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq); 301 return; 302} 303 304static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 305{ 306 u32 handle, mdl_ack_offset, mdl_ack_count; 307 struct cx18_mailbox *mb; 308 309 mb = &order->mb; 310 handle = mb->args[0]; 311 mdl_ack_offset = mb->args[1]; 312 mdl_ack_count = mb->args[2]; 313 314 if (handle == CX18_INVALID_TASK_HANDLE || 315 mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) { 316 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 317 mb_ack_irq(cx, order); 318 return -1; 319 } 320 321 cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset, 322 sizeof(struct cx18_mdl_ack) * mdl_ack_count); 323 324 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 325 mb_ack_irq(cx, order); 326 return 1; 327} 328 329static 330int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 331{ 332 u32 str_offset; 333 char *str = order->str; 334 335 str[0] = '\0'; 336 str_offset = order->mb.args[1]; 337 if (str_offset) { 338 cx18_setup_page(cx, str_offset); 339 cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252); 340 str[252] = '\0'; 341 cx18_setup_page(cx, SCB_OFFSET); 342 } 343 344 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 345 mb_ack_irq(cx, order); 346 347 return str_offset ? 1 : 0; 348} 349 350static inline 351int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 352{ 353 int ret = -1; 354 355 switch (order->rpu) { 356 case CPU: 357 { 358 switch (order->mb.cmd) { 359 case CX18_EPU_DMA_DONE: 360 ret = epu_dma_done_irq(cx, order); 361 break; 362 case CX18_EPU_DEBUG: 363 ret = epu_debug_irq(cx, order); 364 break; 365 default: 366 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n", 367 order->mb.cmd); 368 break; 369 } 370 break; 371 } 372 case APU: 373 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n", 374 order->mb.cmd); 375 break; 376 default: 377 break; 378 } 379 return ret; 380} 381 382static inline 383struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx) 384{ 385 int i; 386 struct cx18_epu_work_order *order = NULL; 387 388 for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) { 389 /* 390 * We only need "pending" atomic to inspect its contents, 391 * and need not do a check and set because: 392 * 1. Any work handler thread only clears "pending" and only 393 * on one, particular work order at a time, per handler thread. 394 * 2. "pending" is only set here, and we're serialized because 395 * we're called in an IRQ handler context. 396 */ 397 if (atomic_read(&cx->epu_work_order[i].pending) == 0) { 398 order = &cx->epu_work_order[i]; 399 atomic_set(&order->pending, 1); 400 break; 401 } 402 } 403 return order; 404} 405 406void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu) 407{ 408 struct cx18_mailbox __iomem *mb; 409 struct cx18_mailbox *order_mb; 410 struct cx18_epu_work_order *order; 411 int submit; 412 413 switch (rpu) { 414 case CPU: 415 mb = &cx->scb->cpu2epu_mb; 416 break; 417 case APU: 418 mb = &cx->scb->apu2epu_mb; 419 break; 420 default: 421 return; 422 } 423 424 order = alloc_epu_work_order_irq(cx); 425 if (order == NULL) { 426 CX18_WARN("Unable to find blank work order form to schedule " 427 "incoming mailbox command processing\n"); 428 return; 429 } 430 431 order->flags = 0; 432 order->rpu = rpu; 433 order_mb = &order->mb; 434 435 /* mb->cmd and mb->args[0] through mb->args[2] */ 436 cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32)); 437 /* mb->request and mb->ack. N.B. we want to read mb->ack last */ 438 cx18_memcpy_fromio(cx, &order_mb->request, &mb->request, 439 2 * sizeof(u32)); 440 441 if (order_mb->request == order_mb->ack) { 442 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our " 443 "incoming %s to EPU mailbox (sequence no. %u)" 444 "\n", 445 rpu_str[rpu], rpu_str[rpu], order_mb->request); 446 dump_mb(cx, order_mb, "incoming"); 447 order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT; 448 } 449 450 /* 451 * Individual EPU command processing is responsible for ack-ing 452 * a non-stale mailbox as soon as possible 453 */ 454 submit = epu_cmd_irq(cx, order); 455 if (submit > 0) { 456 queue_work(cx18_work_queue, &order->work); 457 } 458} 459 460 461/* 462 * Functions called from a non-interrupt, non work_queue context 463 */ 464 465static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[]) 466{ 467 const struct cx18_api_info *info = find_api_info(cmd); 468 u32 state, irq, req, ack, err; 469 struct cx18_mailbox __iomem *mb; 470 u32 __iomem *xpu_state; 471 wait_queue_head_t *waitq; 472 struct mutex *mb_lock; 473 long int timeout, ret; 474 int i; 475 476 if (info == NULL) { 477 CX18_WARN("unknown cmd %x\n", cmd); 478 return -EINVAL; 479 } 480 481 if (cmd == CX18_CPU_DE_SET_MDL) 482 CX18_DEBUG_HI_API("%s\n", info->name); 483 else 484 CX18_DEBUG_API("%s\n", info->name); 485 486 switch (info->rpu) { 487 case APU: 488 waitq = &cx->mb_apu_waitq; 489 mb_lock = &cx->epu2apu_mb_lock; 490 irq = IRQ_EPU_TO_APU; 491 mb = &cx->scb->epu2apu_mb; 492 xpu_state = &cx->scb->apu_state; 493 break; 494 case CPU: 495 waitq = &cx->mb_cpu_waitq; 496 mb_lock = &cx->epu2cpu_mb_lock; 497 irq = IRQ_EPU_TO_CPU; 498 mb = &cx->scb->epu2cpu_mb; 499 xpu_state = &cx->scb->cpu_state; 500 break; 501 default: 502 CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu); 503 return -EINVAL; 504 } 505 506 mutex_lock(mb_lock); 507 /* 508 * Wait for an in-use mailbox to complete 509 * 510 * If the XPU is responding with Ack's, the mailbox shouldn't be in 511 * a busy state, since we serialize access to it on our end. 512 * 513 * If the wait for ack after sending a previous command was interrupted 514 * by a signal, we may get here and find a busy mailbox. After waiting, 515 * mark it "not busy" from our end, if the XPU hasn't ack'ed it still. 516 */ 517 state = cx18_readl(cx, xpu_state); 518 req = cx18_readl(cx, &mb->request); 519 timeout = msecs_to_jiffies(10); 520 ret = wait_event_timeout(*waitq, 521 (ack = cx18_readl(cx, &mb->ack)) == req, 522 timeout); 523 if (req != ack) { 524 /* waited long enough, make the mbox "not busy" from our end */ 525 cx18_writel(cx, req, &mb->ack); 526 CX18_ERR("mbox was found stuck busy when setting up for %s; " 527 "clearing busy and trying to proceed\n", info->name); 528 } else if (ret != timeout) 529 CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n", 530 jiffies_to_msecs(timeout-ret)); 531 532 /* Build the outgoing mailbox */ 533 req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1; 534 535 cx18_writel(cx, cmd, &mb->cmd); 536 for (i = 0; i < args; i++) 537 cx18_writel(cx, data[i], &mb->args[i]); 538 cx18_writel(cx, 0, &mb->error); 539 cx18_writel(cx, req, &mb->request); 540 cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */ 541 542 /* 543 * Notify the XPU and wait for it to send an Ack back 544 */ 545 timeout = msecs_to_jiffies((info->flags & API_FAST) ? 10 : 20); 546 547 CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n", 548 irq, info->name); 549 cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq); 550 551 ret = wait_event_timeout( 552 *waitq, 553 cx18_readl(cx, &mb->ack) == cx18_readl(cx, &mb->request), 554 timeout); 555 556 if (ret == 0) { 557 /* Timed out */ 558 mutex_unlock(mb_lock); 559 CX18_WARN("sending %s timed out waiting %d msecs for RPU " 560 "acknowledgement\n", 561 info->name, jiffies_to_msecs(timeout)); 562 return -EINVAL; 563 } 564 565 if (ret != timeout) 566 CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n", 567 jiffies_to_msecs(timeout-ret), info->name); 568 569 /* Collect data returned by the XPU */ 570 for (i = 0; i < MAX_MB_ARGUMENTS; i++) 571 data[i] = cx18_readl(cx, &mb->args[i]); 572 err = cx18_readl(cx, &mb->error); 573 mutex_unlock(mb_lock); 574 575 /* 576 * Wait for XPU to perform extra actions for the caller in some cases. 577 * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all buffers 578 * back in a burst shortly thereafter 579 */ 580 if (info->flags & API_SLOW) 581 cx18_msleep_timeout(300, 0); 582 583 if (err) 584 CX18_DEBUG_API("mailbox error %08x for command %s\n", err, 585 info->name); 586 return err ? -EIO : 0; 587} 588 589int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]) 590{ 591 return cx18_api_call(cx, cmd, args, data); 592} 593 594static int cx18_set_filter_param(struct cx18_stream *s) 595{ 596 struct cx18 *cx = s->cx; 597 u32 mode; 598 int ret; 599 600 mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0); 601 ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 602 s->handle, 1, mode, cx->spatial_strength); 603 mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0); 604 ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 605 s->handle, 0, mode, cx->temporal_strength); 606 ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 607 s->handle, 2, cx->filter_mode >> 2, 0); 608 return ret; 609} 610 611int cx18_api_func(void *priv, u32 cmd, int in, int out, 612 u32 data[CX2341X_MBOX_MAX_DATA]) 613{ 614 struct cx18 *cx = priv; 615 struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_MPG]; 616 617 switch (cmd) { 618 case CX2341X_ENC_SET_OUTPUT_PORT: 619 return 0; 620 case CX2341X_ENC_SET_FRAME_RATE: 621 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6, 622 s->handle, 0, 0, 0, 0, data[0]); 623 case CX2341X_ENC_SET_FRAME_SIZE: 624 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3, 625 s->handle, data[1], data[0]); 626 case CX2341X_ENC_SET_STREAM_TYPE: 627 return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2, 628 s->handle, data[0]); 629 case CX2341X_ENC_SET_ASPECT_RATIO: 630 return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2, 631 s->handle, data[0]); 632 633 case CX2341X_ENC_SET_GOP_PROPERTIES: 634 return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3, 635 s->handle, data[0], data[1]); 636 case CX2341X_ENC_SET_GOP_CLOSURE: 637 return 0; 638 case CX2341X_ENC_SET_AUDIO_PROPERTIES: 639 return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2, 640 s->handle, data[0]); 641 case CX2341X_ENC_MUTE_AUDIO: 642 return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, 643 s->handle, data[0]); 644 case CX2341X_ENC_SET_BIT_RATE: 645 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5, 646 s->handle, data[0], data[1], data[2], data[3]); 647 case CX2341X_ENC_MUTE_VIDEO: 648 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, 649 s->handle, data[0]); 650 case CX2341X_ENC_SET_FRAME_DROP_RATE: 651 return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2, 652 s->handle, data[0]); 653 case CX2341X_ENC_MISC: 654 return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4, 655 s->handle, data[0], data[1], data[2]); 656 case CX2341X_ENC_SET_DNR_FILTER_MODE: 657 cx->filter_mode = (data[0] & 3) | (data[1] << 2); 658 return cx18_set_filter_param(s); 659 case CX2341X_ENC_SET_DNR_FILTER_PROPS: 660 cx->spatial_strength = data[0]; 661 cx->temporal_strength = data[1]; 662 return cx18_set_filter_param(s); 663 case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE: 664 return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3, 665 s->handle, data[0], data[1]); 666 case CX2341X_ENC_SET_CORING_LEVELS: 667 return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5, 668 s->handle, data[0], data[1], data[2], data[3]); 669 } 670 CX18_WARN("Unknown cmd %x\n", cmd); 671 return 0; 672} 673 674int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], 675 u32 cmd, int args, ...) 676{ 677 va_list ap; 678 int i; 679 680 va_start(ap, args); 681 for (i = 0; i < args; i++) 682 data[i] = va_arg(ap, u32); 683 va_end(ap); 684 return cx18_api(cx, cmd, args, data); 685} 686 687int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...) 688{ 689 u32 data[MAX_MB_ARGUMENTS]; 690 va_list ap; 691 int i; 692 693 if (cx == NULL) { 694 CX18_ERR("cx == NULL (cmd=%x)\n", cmd); 695 return 0; 696 } 697 if (args > MAX_MB_ARGUMENTS) { 698 CX18_ERR("args too big (cmd=%x)\n", cmd); 699 args = MAX_MB_ARGUMENTS; 700 } 701 va_start(ap, args); 702 for (i = 0; i < args; i++) 703 data[i] = va_arg(ap, u32); 704 va_end(ap); 705 return cx18_api(cx, cmd, args, data); 706} 707