Lines Matching refs:m2m_ctx

65 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
69 return &m2m_ctx->out_q_ctx;
71 return &m2m_ctx->cap_q_ctx;
77 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
82 q_ctx = get_queue_ctx(m2m_ctx, type);
189 * @m2m_ctx: m2m context assigned to the instance to be checked
202 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
207 m2m_dev = m2m_ctx->m2m_dev;
208 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
210 if (!m2m_ctx->out_q_ctx.q.streaming
211 || !m2m_ctx->cap_q_ctx.q.streaming) {
217 if (m2m_ctx->job_flags & TRANS_QUEUED) {
223 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
224 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
225 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
230 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
231 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
236 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
239 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
245 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
246 m2m_ctx->job_flags |= TRANS_QUEUED;
266 struct v4l2_m2m_ctx *m2m_ctx)
271 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
287 v4l2_m2m_try_schedule(m2m_ctx);
295 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
300 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
310 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
317 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
339 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
345 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
348 v4l2_m2m_try_schedule(m2m_ctx);
358 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
363 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
371 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
377 vq = v4l2_m2m_get_vq(m2m_ctx, type);
380 v4l2_m2m_try_schedule(m2m_ctx);
389 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
394 vq = v4l2_m2m_get_vq(m2m_ctx, type);
407 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
415 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
416 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
429 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
430 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
435 if (m2m_ctx->m2m_dev->m2m_ops->lock)
436 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
471 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
478 vq = v4l2_m2m_get_src_vq(m2m_ctx);
480 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
540 struct v4l2_m2m_ctx *m2m_ctx;
544 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
545 if (!m2m_ctx)
548 m2m_ctx->priv = drv_priv;
549 m2m_ctx->m2m_dev = m2m_dev;
550 init_waitqueue_head(&m2m_ctx->finished);
552 out_q_ctx = &m2m_ctx->out_q_ctx;
553 cap_q_ctx = &m2m_ctx->cap_q_ctx;
560 INIT_LIST_HEAD(&m2m_ctx->queue);
567 return m2m_ctx;
569 kfree(m2m_ctx);
579 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
584 m2m_dev = m2m_ctx->m2m_dev;
587 if (m2m_ctx->job_flags & TRANS_RUNNING) {
589 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
590 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
591 wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
592 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
593 list_del(&m2m_ctx->queue);
594 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
596 dprintk("m2m_ctx: %p had been on queue and was removed\n",
597 m2m_ctx);
603 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
604 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
606 kfree(m2m_ctx);
615 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
621 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);