cx18-mailbox.c revision bca11a5721917d6d5874571813673a2669ffec4b
1/* 2 * cx18 mailbox functions 3 * 4 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 * 02111-1307 USA 20 */ 21 22#include <stdarg.h> 23 24#include "cx18-driver.h" 25#include "cx18-io.h" 26#include "cx18-scb.h" 27#include "cx18-irq.h" 28#include "cx18-mailbox.h" 29#include "cx18-queue.h" 30#include "cx18-streams.h" 31 32static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" }; 33 34#define API_FAST (1 << 2) /* Short timeout */ 35#define API_SLOW (1 << 3) /* Additional 300ms timeout */ 36 37struct cx18_api_info { 38 u32 cmd; 39 u8 flags; /* Flags, see above */ 40 u8 rpu; /* Processing unit */ 41 const char *name; /* The name of the command */ 42}; 43 44#define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x } 45 46static const struct cx18_api_info api_info[] = { 47 /* MPEG encoder API */ 48 API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0), 49 API_ENTRY(CPU, CX18_EPU_DEBUG, 0), 50 API_ENTRY(CPU, CX18_CREATE_TASK, 0), 51 API_ENTRY(CPU, CX18_DESTROY_TASK, 0), 52 API_ENTRY(CPU, CX18_CPU_CAPTURE_START, API_SLOW), 53 API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP, API_SLOW), 54 API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE, 0), 55 API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME, 0), 56 API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE, 0), 57 API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 0), 58 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN, 0), 59 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE, 0), 60 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION, 0), 61 API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM, 0), 62 API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 0), 63 API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING, 0), 64 API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE, 0), 65 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS, 0), 66 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE, 0), 67 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE, 0), 68 API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS, 0), 69 API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM, API_SLOW), 70 API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO, 0), 71 API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT, 0), 72 API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID, 0), 73 API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID, 0), 74 API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE, 0), 75 API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE, 0), 76 API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION, 0), 77 API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO, 0), 78 API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME, 0), 79 API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM, 0), 80 API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER, 0), 81 API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS, 0), 82 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK, 0), 83 API_ENTRY(CPU, CX18_CPU_DE_SET_MDL, API_FAST), 84 API_ENTRY(CPU, CX18_APU_RESETAI, API_FAST), 85 API_ENTRY(CPU, CX18_CPU_DE_RELEASE_MDL, API_SLOW), 86 API_ENTRY(0, 0, 0), 87}; 88 89static const struct cx18_api_info *find_api_info(u32 cmd) 90{ 91 int i; 92 93 for (i = 0; api_info[i].cmd; i++) 94 if (api_info[i].cmd == cmd) 95 return &api_info[i]; 96 return NULL; 97} 98 99static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name) 100{ 101 char argstr[MAX_MB_ARGUMENTS*11+1]; 102 char *p; 103 int i; 104 105 if (!(cx18_debug & CX18_DBGFLG_API)) 106 return; 107 108 for (i = 0, p = argstr; i < MAX_MB_ARGUMENTS; i++, p += 11) { 109 /* kernel snprintf() appends '\0' always */ 110 snprintf(p, 12, " %#010x", mb->args[i]); 111 } 112 CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s" 113 "\n", name, mb->request, mb->ack, mb->cmd, mb->error, argstr); 114} 115 116 117/* 118 * Functions that run in a work_queue work handling context 119 */ 120 121static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order) 122{ 123 u32 handle, mdl_ack_count, id; 124 struct cx18_mailbox *mb; 125 struct cx18_mdl_ack *mdl_ack; 126 struct cx18_stream *s; 127 struct cx18_buffer *buf; 128 int i; 129 130 mb = &order->mb; 131 handle = mb->args[0]; 132 s = cx18_handle_to_stream(cx, handle); 133 134 if (s == NULL) { 135 CX18_WARN("Got DMA done notification for unknown/inactive" 136 " handle %d, %s mailbox seq no %d\n", handle, 137 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? 138 "stale" : "good", mb->request); 139 return; 140 } 141 142 mdl_ack_count = mb->args[2]; 143 mdl_ack = order->mdl_ack; 144 for (i = 0; i < mdl_ack_count; i++, mdl_ack++) { 145 id = mdl_ack->id; 146 /* 147 * Simple integrity check for processing a stale (and possibly 148 * inconsistent mailbox): make sure the buffer id is in the 149 * valid range for the stream. 150 * 151 * We go through the trouble of dealing with stale mailboxes 152 * because most of the time, the mailbox data is still valid and 153 * unchanged (and in practice the firmware ping-pongs the 154 * two mdl_ack buffers so mdl_acks are not stale). 155 * 156 * There are occasions when we get a half changed mailbox, 157 * which this check catches for a handle & id mismatch. If the 158 * handle and id do correspond, the worst case is that we 159 * completely lost the old buffer, but pick up the new buffer 160 * early (but the new mdl_ack is guaranteed to be good in this 161 * case as the firmware wouldn't point us to a new mdl_ack until 162 * it's filled in). 163 * 164 * cx18_queue_get buf() will detect the lost buffers 165 * and put them back in rotation eventually. 166 */ 167 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && 168 !(id >= s->mdl_offset && 169 id < (s->mdl_offset + s->buffers))) { 170 CX18_WARN("Fell behind! Ignoring stale mailbox with " 171 " inconsistent data. Lost buffer for mailbox " 172 "seq no %d\n", mb->request); 173 break; 174 } 175 buf = cx18_queue_get_buf(s, id, mdl_ack->data_used); 176 CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id); 177 if (buf == NULL) { 178 CX18_WARN("Could not find buf %d for stream %s\n", 179 id, s->name); 180 continue; 181 } 182 183 cx18_buf_sync_for_cpu(s, buf); 184 if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) { 185 CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n", 186 buf->bytesused); 187 188 dvb_dmx_swfilter(&s->dvb.demux, buf->buf, 189 buf->bytesused); 190 191 cx18_buf_sync_for_device(s, buf); 192 cx18_enqueue(s, buf, &s->q_free); 193 194 if (s->handle != CX18_INVALID_TASK_HANDLE && 195 test_bit(CX18_F_S_STREAMING, &s->s_flags)) 196 cx18_vapi(cx, 197 CX18_CPU_DE_SET_MDL, 5, s->handle, 198 (void __iomem *) 199 &cx->scb->cpu_mdl[buf->id] - cx->enc_mem, 200 1, buf->id, s->buf_size); 201 } else 202 set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags); 203 } 204 wake_up(&cx->dma_waitq); 205 if (s->id != -1) 206 wake_up(&s->waitq); 207} 208 209static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order) 210{ 211 char *p; 212 char *str = order->str; 213 214 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); 215 p = strchr(str, '.'); 216 if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str) 217 CX18_INFO("FW version: %s\n", p - 1); 218} 219 220static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order) 221{ 222 switch (order->rpu) { 223 case CPU: 224 { 225 switch (order->mb.cmd) { 226 case CX18_EPU_DMA_DONE: 227 epu_dma_done(cx, order); 228 break; 229 case CX18_EPU_DEBUG: 230 epu_debug(cx, order); 231 break; 232 default: 233 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n", 234 order->mb.cmd); 235 break; 236 } 237 break; 238 } 239 case APU: 240 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n", 241 order->mb.cmd); 242 break; 243 default: 244 break; 245 } 246} 247 248static 249void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order) 250{ 251 atomic_set(&order->pending, 0); 252} 253 254void cx18_epu_work_handler(struct work_struct *work) 255{ 256 struct cx18_epu_work_order *order = 257 container_of(work, struct cx18_epu_work_order, work); 258 struct cx18 *cx = order->cx; 259 epu_cmd(cx, order); 260 free_epu_work_order(cx, order); 261} 262 263 264/* 265 * Functions that run in an interrupt handling context 266 */ 267 268static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 269{ 270 struct cx18_mailbox __iomem *ack_mb; 271 u32 ack_irq, req; 272 273 switch (order->rpu) { 274 case APU: 275 ack_irq = IRQ_EPU_TO_APU_ACK; 276 ack_mb = &cx->scb->apu2epu_mb; 277 break; 278 case CPU: 279 ack_irq = IRQ_EPU_TO_CPU_ACK; 280 ack_mb = &cx->scb->cpu2epu_mb; 281 break; 282 default: 283 CX18_WARN("Unhandled RPU (%d) for command %x ack\n", 284 order->rpu, order->mb.cmd); 285 return; 286 } 287 288 req = order->mb.request; 289 /* Don't ack if the RPU has gotten impatient and timed us out */ 290 if (req != cx18_readl(cx, &ack_mb->request) || 291 req == cx18_readl(cx, &ack_mb->ack)) { 292 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our " 293 "incoming %s to EPU mailbox (sequence no. %u) " 294 "while processing\n", 295 rpu_str[order->rpu], rpu_str[order->rpu], req); 296 order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC; 297 return; 298 } 299 cx18_writel(cx, req, &ack_mb->ack); 300 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq); 301 return; 302} 303 304static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 305{ 306 u32 handle, mdl_ack_offset, mdl_ack_count; 307 struct cx18_mailbox *mb; 308 309 mb = &order->mb; 310 handle = mb->args[0]; 311 mdl_ack_offset = mb->args[1]; 312 mdl_ack_count = mb->args[2]; 313 314 if (handle == CX18_INVALID_TASK_HANDLE || 315 mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) { 316 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 317 mb_ack_irq(cx, order); 318 return -1; 319 } 320 321 cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset, 322 sizeof(struct cx18_mdl_ack) * mdl_ack_count); 323 324 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 325 mb_ack_irq(cx, order); 326 return 1; 327} 328 329static 330int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 331{ 332 u32 str_offset; 333 char *str = order->str; 334 335 str[0] = '\0'; 336 str_offset = order->mb.args[1]; 337 if (str_offset) { 338 cx18_setup_page(cx, str_offset); 339 cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252); 340 str[252] = '\0'; 341 cx18_setup_page(cx, SCB_OFFSET); 342 } 343 344 if ((order->flags & CX18_F_EWO_MB_STALE) == 0) 345 mb_ack_irq(cx, order); 346 347 return str_offset ? 1 : 0; 348} 349 350static inline 351int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order) 352{ 353 int ret = -1; 354 355 switch (order->rpu) { 356 case CPU: 357 { 358 switch (order->mb.cmd) { 359 case CX18_EPU_DMA_DONE: 360 ret = epu_dma_done_irq(cx, order); 361 break; 362 case CX18_EPU_DEBUG: 363 ret = epu_debug_irq(cx, order); 364 break; 365 default: 366 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n", 367 order->mb.cmd); 368 break; 369 } 370 break; 371 } 372 case APU: 373 CX18_WARN("Unknown APU to EPU mailbox command %#0x\n", 374 order->mb.cmd); 375 break; 376 default: 377 break; 378 } 379 return ret; 380} 381 382static inline 383struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx) 384{ 385 int i; 386 struct cx18_epu_work_order *order = NULL; 387 388 for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) { 389 /* 390 * We only need "pending" atomic to inspect its contents, 391 * and need not do a check and set because: 392 * 1. Any work handler thread only clears "pending" and only 393 * on one, particular work order at a time, per handler thread. 394 * 2. "pending" is only set here, and we're serialized because 395 * we're called in an IRQ handler context. 396 */ 397 if (atomic_read(&cx->epu_work_order[i].pending) == 0) { 398 order = &cx->epu_work_order[i]; 399 atomic_set(&order->pending, 1); 400 break; 401 } 402 } 403 return order; 404} 405 406void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu) 407{ 408 struct cx18_mailbox __iomem *mb; 409 struct cx18_mailbox *order_mb; 410 struct cx18_epu_work_order *order; 411 int submit; 412 413 switch (rpu) { 414 case CPU: 415 mb = &cx->scb->cpu2epu_mb; 416 break; 417 case APU: 418 mb = &cx->scb->apu2epu_mb; 419 break; 420 default: 421 return; 422 } 423 424 order = alloc_epu_work_order_irq(cx); 425 if (order == NULL) { 426 CX18_WARN("Unable to find blank work order form to schedule " 427 "incoming mailbox command processing\n"); 428 return; 429 } 430 431 order->flags = 0; 432 order->rpu = rpu; 433 order_mb = &order->mb; 434 435 /* mb->cmd and mb->args[0] through mb->args[2] */ 436 cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32)); 437 /* mb->request and mb->ack. N.B. we want to read mb->ack last */ 438 cx18_memcpy_fromio(cx, &order_mb->request, &mb->request, 439 2 * sizeof(u32)); 440 441 if (order_mb->request == order_mb->ack) { 442 CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our " 443 "incoming %s to EPU mailbox (sequence no. %u)" 444 "\n", 445 rpu_str[rpu], rpu_str[rpu], order_mb->request); 446 dump_mb(cx, order_mb, "incoming"); 447 order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT; 448 } 449 450 /* 451 * Individual EPU command processing is responsible for ack-ing 452 * a non-stale mailbox as soon as possible 453 */ 454 submit = epu_cmd_irq(cx, order); 455 if (submit > 0) { 456 queue_work(cx18_work_queue, &order->work); 457 } 458} 459 460 461/* 462 * Functions called from a non-interrupt, non work_queue context 463 */ 464 465static void cx18_api_log_ack_delay(struct cx18 *cx, int msecs) 466{ 467 if (msecs > CX18_MAX_MB_ACK_DELAY) 468 msecs = CX18_MAX_MB_ACK_DELAY; 469 atomic_inc(&cx->mbox_stats.mb_ack_delay[msecs]); 470} 471 472static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[]) 473{ 474 const struct cx18_api_info *info = find_api_info(cmd); 475 u32 state, irq, req, ack, err; 476 struct cx18_mailbox __iomem *mb; 477 u32 __iomem *xpu_state; 478 wait_queue_head_t *waitq; 479 struct mutex *mb_lock; 480 long int timeout, ret; 481 int i; 482 483 if (info == NULL) { 484 CX18_WARN("unknown cmd %x\n", cmd); 485 return -EINVAL; 486 } 487 488 if (cmd == CX18_CPU_DE_SET_MDL) 489 CX18_DEBUG_HI_API("%s\n", info->name); 490 else 491 CX18_DEBUG_API("%s\n", info->name); 492 493 switch (info->rpu) { 494 case APU: 495 waitq = &cx->mb_apu_waitq; 496 mb_lock = &cx->epu2apu_mb_lock; 497 irq = IRQ_EPU_TO_APU; 498 mb = &cx->scb->epu2apu_mb; 499 xpu_state = &cx->scb->apu_state; 500 break; 501 case CPU: 502 waitq = &cx->mb_cpu_waitq; 503 mb_lock = &cx->epu2cpu_mb_lock; 504 irq = IRQ_EPU_TO_CPU; 505 mb = &cx->scb->epu2cpu_mb; 506 xpu_state = &cx->scb->cpu_state; 507 break; 508 default: 509 CX18_WARN("Unknown RPU (%d) for API call\n", info->rpu); 510 return -EINVAL; 511 } 512 513 mutex_lock(mb_lock); 514 /* 515 * Wait for an in-use mailbox to complete 516 * 517 * If the XPU is responding with Ack's, the mailbox shouldn't be in 518 * a busy state, since we serialize access to it on our end. 519 * 520 * If the wait for ack after sending a previous command was interrupted 521 * by a signal, we may get here and find a busy mailbox. After waiting, 522 * mark it "not busy" from our end, if the XPU hasn't ack'ed it still. 523 */ 524 state = cx18_readl(cx, xpu_state); 525 req = cx18_readl(cx, &mb->request); 526 timeout = msecs_to_jiffies(20); /* 1 field at 50 Hz vertical refresh */ 527 ret = wait_event_timeout(*waitq, 528 (ack = cx18_readl(cx, &mb->ack)) == req, 529 timeout); 530 if (req != ack) { 531 /* waited long enough, make the mbox "not busy" from our end */ 532 cx18_writel(cx, req, &mb->ack); 533 CX18_ERR("mbox was found stuck busy when setting up for %s; " 534 "clearing busy and trying to proceed\n", info->name); 535 } else if (ret != timeout) 536 CX18_DEBUG_API("waited %u usecs for busy mbox to be acked\n", 537 jiffies_to_usecs(timeout-ret)); 538 539 /* Build the outgoing mailbox */ 540 req = ((req & 0xfffffffe) == 0xfffffffe) ? 1 : req + 1; 541 542 cx18_writel(cx, cmd, &mb->cmd); 543 for (i = 0; i < args; i++) 544 cx18_writel(cx, data[i], &mb->args[i]); 545 cx18_writel(cx, 0, &mb->error); 546 cx18_writel(cx, req, &mb->request); 547 cx18_writel(cx, req - 1, &mb->ack); /* ensure ack & req are distinct */ 548 549 /* 550 * Notify the XPU and wait for it to send an Ack back 551 * 21 ms = ~ 0.5 frames at a frame rate of 24 fps 552 * 42 ms = ~ 1 frame at a frame rate of 24 fps 553 */ 554 timeout = msecs_to_jiffies((info->flags & API_FAST) ? 21 : 42); 555 556 CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n", 557 irq, info->name); 558 cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq); 559 560 ret = wait_event_timeout( 561 *waitq, 562 cx18_readl(cx, &mb->ack) == cx18_readl(cx, &mb->request), 563 timeout); 564 if (ret == 0) { 565 /* Timed out */ 566 mutex_unlock(mb_lock); 567 i = jiffies_to_msecs(timeout); 568 cx18_api_log_ack_delay(cx, i); 569 CX18_WARN("sending %s timed out waiting %d msecs for RPU " 570 "acknowledgement\n", info->name, i); 571 return -EINVAL; 572 } else if (ret < 0) { 573 /* Interrupted */ 574 mutex_unlock(mb_lock); 575 CX18_WARN("sending %s was interrupted waiting for RPU" 576 "acknowledgement\n", info->name); 577 return -EINTR; 578 } 579 580 i = jiffies_to_msecs(timeout-ret); 581 cx18_api_log_ack_delay(cx, i); 582 if (ret != timeout) 583 CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n", 584 i, info->name); 585 586 /* Collect data returned by the XPU */ 587 for (i = 0; i < MAX_MB_ARGUMENTS; i++) 588 data[i] = cx18_readl(cx, &mb->args[i]); 589 err = cx18_readl(cx, &mb->error); 590 mutex_unlock(mb_lock); 591 592 /* 593 * Wait for XPU to perform extra actions for the caller in some cases. 594 * e.g. CX18_CPU_DE_RELEASE_MDL will cause the CPU to send all buffers 595 * back in a burst shortly thereafter 596 */ 597 if (info->flags & API_SLOW) 598 cx18_msleep_timeout(300, 0); 599 600 if (err) 601 CX18_DEBUG_API("mailbox error %08x for command %s\n", err, 602 info->name); 603 return err ? -EIO : 0; 604} 605 606int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]) 607{ 608 return cx18_api_call(cx, cmd, args, data); 609} 610 611static int cx18_set_filter_param(struct cx18_stream *s) 612{ 613 struct cx18 *cx = s->cx; 614 u32 mode; 615 int ret; 616 617 mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0); 618 ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 619 s->handle, 1, mode, cx->spatial_strength); 620 mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0); 621 ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 622 s->handle, 0, mode, cx->temporal_strength); 623 ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4, 624 s->handle, 2, cx->filter_mode >> 2, 0); 625 return ret; 626} 627 628int cx18_api_func(void *priv, u32 cmd, int in, int out, 629 u32 data[CX2341X_MBOX_MAX_DATA]) 630{ 631 struct cx18 *cx = priv; 632 struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_MPG]; 633 634 switch (cmd) { 635 case CX2341X_ENC_SET_OUTPUT_PORT: 636 return 0; 637 case CX2341X_ENC_SET_FRAME_RATE: 638 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6, 639 s->handle, 0, 0, 0, 0, data[0]); 640 case CX2341X_ENC_SET_FRAME_SIZE: 641 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3, 642 s->handle, data[1], data[0]); 643 case CX2341X_ENC_SET_STREAM_TYPE: 644 return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2, 645 s->handle, data[0]); 646 case CX2341X_ENC_SET_ASPECT_RATIO: 647 return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2, 648 s->handle, data[0]); 649 650 case CX2341X_ENC_SET_GOP_PROPERTIES: 651 return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3, 652 s->handle, data[0], data[1]); 653 case CX2341X_ENC_SET_GOP_CLOSURE: 654 return 0; 655 case CX2341X_ENC_SET_AUDIO_PROPERTIES: 656 return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2, 657 s->handle, data[0]); 658 case CX2341X_ENC_MUTE_AUDIO: 659 return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2, 660 s->handle, data[0]); 661 case CX2341X_ENC_SET_BIT_RATE: 662 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5, 663 s->handle, data[0], data[1], data[2], data[3]); 664 case CX2341X_ENC_MUTE_VIDEO: 665 return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, 666 s->handle, data[0]); 667 case CX2341X_ENC_SET_FRAME_DROP_RATE: 668 return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2, 669 s->handle, data[0]); 670 case CX2341X_ENC_MISC: 671 return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4, 672 s->handle, data[0], data[1], data[2]); 673 case CX2341X_ENC_SET_DNR_FILTER_MODE: 674 cx->filter_mode = (data[0] & 3) | (data[1] << 2); 675 return cx18_set_filter_param(s); 676 case CX2341X_ENC_SET_DNR_FILTER_PROPS: 677 cx->spatial_strength = data[0]; 678 cx->temporal_strength = data[1]; 679 return cx18_set_filter_param(s); 680 case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE: 681 return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3, 682 s->handle, data[0], data[1]); 683 case CX2341X_ENC_SET_CORING_LEVELS: 684 return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5, 685 s->handle, data[0], data[1], data[2], data[3]); 686 } 687 CX18_WARN("Unknown cmd %x\n", cmd); 688 return 0; 689} 690 691int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], 692 u32 cmd, int args, ...) 693{ 694 va_list ap; 695 int i; 696 697 va_start(ap, args); 698 for (i = 0; i < args; i++) 699 data[i] = va_arg(ap, u32); 700 va_end(ap); 701 return cx18_api(cx, cmd, args, data); 702} 703 704int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...) 705{ 706 u32 data[MAX_MB_ARGUMENTS]; 707 va_list ap; 708 int i; 709 710 if (cx == NULL) { 711 CX18_ERR("cx == NULL (cmd=%x)\n", cmd); 712 return 0; 713 } 714 if (args > MAX_MB_ARGUMENTS) { 715 CX18_ERR("args too big (cmd=%x)\n", cmd); 716 args = MAX_MB_ARGUMENTS; 717 } 718 va_start(ap, args); 719 for (i = 0; i < args; i++) 720 data[i] = va_arg(ap, u32); 721 va_end(ap); 722 return cx18_api(cx, cmd, args, data); 723} 724