s5p_mfc.c revision 65fccab560922d8a1a1e7d3c9711c309126d636f
1/*
2 * Samsung S5P Multi Format Codec v 5.1
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Kamil Debski, <k.debski@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/videodev2.h>
22#include <media/v4l2-event.h>
23#include <linux/workqueue.h>
24#include <linux/of.h>
25#include <media/videobuf2-core.h>
26#include "s5p_mfc_common.h"
27#include "s5p_mfc_ctrl.h"
28#include "s5p_mfc_debug.h"
29#include "s5p_mfc_dec.h"
30#include "s5p_mfc_enc.h"
31#include "s5p_mfc_intr.h"
32#include "s5p_mfc_opr.h"
33#include "s5p_mfc_cmd.h"
34#include "s5p_mfc_pm.h"
35
36#define S5P_MFC_NAME		"s5p-mfc"
37#define S5P_MFC_DEC_NAME	"s5p-mfc-dec"
38#define S5P_MFC_ENC_NAME	"s5p-mfc-enc"
39
40int debug;
41module_param(debug, int, S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
43
44/* Helper functions for interrupt processing */
45
46/* Remove from hw execution round robin */
47void clear_work_bit(struct s5p_mfc_ctx *ctx)
48{
49	struct s5p_mfc_dev *dev = ctx->dev;
50
51	spin_lock(&dev->condlock);
52	__clear_bit(ctx->num, &dev->ctx_work_bits);
53	spin_unlock(&dev->condlock);
54}
55
56/* Add to hw execution round robin */
57void set_work_bit(struct s5p_mfc_ctx *ctx)
58{
59	struct s5p_mfc_dev *dev = ctx->dev;
60
61	spin_lock(&dev->condlock);
62	__set_bit(ctx->num, &dev->ctx_work_bits);
63	spin_unlock(&dev->condlock);
64}
65
66/* Remove from hw execution round robin */
67void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
68{
69	struct s5p_mfc_dev *dev = ctx->dev;
70	unsigned long flags;
71
72	spin_lock_irqsave(&dev->condlock, flags);
73	__clear_bit(ctx->num, &dev->ctx_work_bits);
74	spin_unlock_irqrestore(&dev->condlock, flags);
75}
76
77/* Add to hw execution round robin */
78void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
79{
80	struct s5p_mfc_dev *dev = ctx->dev;
81	unsigned long flags;
82
83	spin_lock_irqsave(&dev->condlock, flags);
84	__set_bit(ctx->num, &dev->ctx_work_bits);
85	spin_unlock_irqrestore(&dev->condlock, flags);
86}
87
88/* Wake up context wait_queue */
89static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
90			unsigned int err)
91{
92	ctx->int_cond = 1;
93	ctx->int_type = reason;
94	ctx->int_err = err;
95	wake_up(&ctx->queue);
96}
97
98/* Wake up device wait_queue */
99static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
100			unsigned int err)
101{
102	dev->int_cond = 1;
103	dev->int_type = reason;
104	dev->int_err = err;
105	wake_up(&dev->queue);
106}
107
108static void s5p_mfc_watchdog(unsigned long arg)
109{
110	struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
111
112	if (test_bit(0, &dev->hw_lock))
113		atomic_inc(&dev->watchdog_cnt);
114	if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
115		/* This means that hw is busy and no interrupts were
116		 * generated by hw for the Nth time of running this
117		 * watchdog timer. This usually means a serious hw
118		 * error. Now it is time to kill all instances and
119		 * reset the MFC. */
120		mfc_err("Time out during waiting for HW\n");
121		queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
122	}
123	dev->watchdog_timer.expires = jiffies +
124					msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
125	add_timer(&dev->watchdog_timer);
126}
127
128static void s5p_mfc_watchdog_worker(struct work_struct *work)
129{
130	struct s5p_mfc_dev *dev;
131	struct s5p_mfc_ctx *ctx;
132	unsigned long flags;
133	int mutex_locked;
134	int i, ret;
135
136	dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
137
138	mfc_err("Driver timeout error handling\n");
139	/* Lock the mutex that protects open and release.
140	 * This is necessary as they may load and unload firmware. */
141	mutex_locked = mutex_trylock(&dev->mfc_mutex);
142	if (!mutex_locked)
143		mfc_err("Error: some instance may be closing/opening\n");
144	spin_lock_irqsave(&dev->irqlock, flags);
145
146	s5p_mfc_clock_off();
147
148	for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
149		ctx = dev->ctx[i];
150		if (!ctx)
151			continue;
152		ctx->state = MFCINST_ERROR;
153		s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
154				&ctx->vq_dst);
155		s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
156				&ctx->vq_src);
157		clear_work_bit(ctx);
158		wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0);
159	}
160	clear_bit(0, &dev->hw_lock);
161	spin_unlock_irqrestore(&dev->irqlock, flags);
162	/* Double check if there is at least one instance running.
163	 * If no instance is in memory than no firmware should be present */
164	if (dev->num_inst > 0) {
165		ret = s5p_mfc_reload_firmware(dev);
166		if (ret) {
167			mfc_err("Failed to reload FW\n");
168			goto unlock;
169		}
170		s5p_mfc_clock_on();
171		ret = s5p_mfc_init_hw(dev);
172		if (ret)
173			mfc_err("Failed to reinit FW\n");
174	}
175unlock:
176	if (mutex_locked)
177		mutex_unlock(&dev->mfc_mutex);
178}
179
180static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
181{
182	struct video_device *vdev = video_devdata(file);
183
184	if (!vdev) {
185		mfc_err("failed to get video_device");
186		return MFCNODE_INVALID;
187	}
188	if (vdev->index == 0)
189		return MFCNODE_DECODER;
190	else if (vdev->index == 1)
191		return MFCNODE_ENCODER;
192	return MFCNODE_INVALID;
193}
194
195static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
196{
197	mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
198	mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
199	mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
200}
201
202static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
203{
204	struct s5p_mfc_buf *dst_buf;
205	struct s5p_mfc_dev *dev = ctx->dev;
206
207	ctx->state = MFCINST_FINISHED;
208	ctx->sequence++;
209	while (!list_empty(&ctx->dst_queue)) {
210		dst_buf = list_entry(ctx->dst_queue.next,
211				     struct s5p_mfc_buf, list);
212		mfc_debug(2, "Cleaning up buffer: %d\n",
213					  dst_buf->b->v4l2_buf.index);
214		vb2_set_plane_payload(dst_buf->b, 0, 0);
215		vb2_set_plane_payload(dst_buf->b, 1, 0);
216		list_del(&dst_buf->list);
217		ctx->dst_queue_cnt--;
218		dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
219
220		if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
221			s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
222			dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
223		else
224			dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
225
226		ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
227		vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
228	}
229}
230
231static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
232{
233	struct s5p_mfc_dev *dev = ctx->dev;
234	struct s5p_mfc_buf  *dst_buf, *src_buf;
235	size_t dec_y_addr;
236	unsigned int frame_type;
237
238	dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
239	frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
240
241	/* Copy timestamp / timecode from decoded src to dst and set
242	   appropraite flags */
243	src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
244	list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
245		if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
246			memcpy(&dst_buf->b->v4l2_buf.timecode,
247				&src_buf->b->v4l2_buf.timecode,
248				sizeof(struct v4l2_timecode));
249			memcpy(&dst_buf->b->v4l2_buf.timestamp,
250				&src_buf->b->v4l2_buf.timestamp,
251				sizeof(struct timeval));
252			switch (frame_type) {
253			case S5P_FIMV_DECODE_FRAME_I_FRAME:
254				dst_buf->b->v4l2_buf.flags |=
255						V4L2_BUF_FLAG_KEYFRAME;
256				break;
257			case S5P_FIMV_DECODE_FRAME_P_FRAME:
258				dst_buf->b->v4l2_buf.flags |=
259						V4L2_BUF_FLAG_PFRAME;
260				break;
261			case S5P_FIMV_DECODE_FRAME_B_FRAME:
262				dst_buf->b->v4l2_buf.flags |=
263						V4L2_BUF_FLAG_BFRAME;
264				break;
265			}
266			break;
267		}
268	}
269}
270
271static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
272{
273	struct s5p_mfc_dev *dev = ctx->dev;
274	struct s5p_mfc_buf  *dst_buf;
275	size_t dspl_y_addr;
276	unsigned int frame_type;
277
278	dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
279	frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx);
280
281	/* If frame is same as previous then skip and do not dequeue */
282	if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
283		if (!ctx->after_packed_pb)
284			ctx->sequence++;
285		ctx->after_packed_pb = 0;
286		return;
287	}
288	ctx->sequence++;
289	/* The MFC returns address of the buffer, now we have to
290	 * check which videobuf does it correspond to */
291	list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
292		/* Check if this is the buffer we're looking for */
293		if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
294			list_del(&dst_buf->list);
295			ctx->dst_queue_cnt--;
296			dst_buf->b->v4l2_buf.sequence = ctx->sequence;
297			if (s5p_mfc_hw_call(dev->mfc_ops,
298					get_pic_type_top, ctx) ==
299				s5p_mfc_hw_call(dev->mfc_ops,
300					get_pic_type_bot, ctx))
301				dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
302			else
303				dst_buf->b->v4l2_buf.field =
304							V4L2_FIELD_INTERLACED;
305			vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
306			vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
307			clear_bit(dst_buf->b->v4l2_buf.index,
308							&ctx->dec_dst_flag);
309
310			vb2_buffer_done(dst_buf->b,
311				err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
312
313			break;
314		}
315	}
316}
317
318/* Handle frame decoding interrupt */
319static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
320					unsigned int reason, unsigned int err)
321{
322	struct s5p_mfc_dev *dev = ctx->dev;
323	unsigned int dst_frame_status;
324	struct s5p_mfc_buf *src_buf;
325	unsigned long flags;
326	unsigned int res_change;
327
328	dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
329				& S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
330	res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
331				& S5P_FIMV_DEC_STATUS_RESOLUTION_MASK)
332				>> S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT;
333	mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
334	if (ctx->state == MFCINST_RES_CHANGE_INIT)
335		ctx->state = MFCINST_RES_CHANGE_FLUSH;
336	if (res_change == S5P_FIMV_RES_INCREASE ||
337		res_change == S5P_FIMV_RES_DECREASE) {
338		ctx->state = MFCINST_RES_CHANGE_INIT;
339		s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
340		wake_up_ctx(ctx, reason, err);
341		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
342			BUG();
343		s5p_mfc_clock_off();
344		s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
345		return;
346	}
347	if (ctx->dpb_flush_flag)
348		ctx->dpb_flush_flag = 0;
349
350	spin_lock_irqsave(&dev->irqlock, flags);
351	/* All frames remaining in the buffer have been extracted  */
352	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
353		if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
354			s5p_mfc_handle_frame_all_extracted(ctx);
355			ctx->state = MFCINST_RES_CHANGE_END;
356			goto leave_handle_frame;
357		} else {
358			s5p_mfc_handle_frame_all_extracted(ctx);
359		}
360	}
361
362	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY ||
363		dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_ONLY)
364		s5p_mfc_handle_frame_copy_time(ctx);
365
366	/* A frame has been decoded and is in the buffer  */
367	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
368	    dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
369		s5p_mfc_handle_frame_new(ctx, err);
370	} else {
371		mfc_debug(2, "No frame decode\n");
372	}
373	/* Mark source buffer as complete */
374	if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
375		&& !list_empty(&ctx->src_queue)) {
376		src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
377								list);
378		ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
379						get_consumed_stream, dev);
380		if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
381			ctx->consumed_stream + STUFF_BYTE <
382			src_buf->b->v4l2_planes[0].bytesused) {
383			/* Run MFC again on the same buffer */
384			mfc_debug(2, "Running again the same buffer\n");
385			ctx->after_packed_pb = 1;
386		} else {
387			mfc_debug(2, "MFC needs next buffer\n");
388			ctx->consumed_stream = 0;
389			if (src_buf->flags & MFC_BUF_FLAG_EOS)
390				ctx->state = MFCINST_FINISHING;
391			list_del(&src_buf->list);
392			ctx->src_queue_cnt--;
393			if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
394				vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
395			else
396				vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
397		}
398	}
399leave_handle_frame:
400	spin_unlock_irqrestore(&dev->irqlock, flags);
401	if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
402				    || ctx->dst_queue_cnt < ctx->dpb_count)
403		clear_work_bit(ctx);
404	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
405	wake_up_ctx(ctx, reason, err);
406	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
407		BUG();
408	s5p_mfc_clock_off();
409	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
410}
411
412/* Error handling for interrupt */
413static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
414		struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
415{
416	unsigned long flags;
417
418	mfc_err("Interrupt Error: %08x\n", err);
419
420	if (ctx != NULL) {
421		/* Error recovery is dependent on the state of context */
422		switch (ctx->state) {
423		case MFCINST_RES_CHANGE_INIT:
424		case MFCINST_RES_CHANGE_FLUSH:
425		case MFCINST_RES_CHANGE_END:
426		case MFCINST_FINISHING:
427		case MFCINST_FINISHED:
428		case MFCINST_RUNNING:
429			/* It is higly probable that an error occured
430			 * while decoding a frame */
431			clear_work_bit(ctx);
432			ctx->state = MFCINST_ERROR;
433			/* Mark all dst buffers as having an error */
434			spin_lock_irqsave(&dev->irqlock, flags);
435			s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
436						&ctx->dst_queue, &ctx->vq_dst);
437			/* Mark all src buffers as having an error */
438			s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
439						&ctx->src_queue, &ctx->vq_src);
440			spin_unlock_irqrestore(&dev->irqlock, flags);
441			wake_up_ctx(ctx, reason, err);
442			break;
443		default:
444			clear_work_bit(ctx);
445			ctx->state = MFCINST_ERROR;
446			wake_up_ctx(ctx, reason, err);
447			break;
448		}
449	}
450	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
451		BUG();
452	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
453	s5p_mfc_clock_off();
454	wake_up_dev(dev, reason, err);
455	return;
456}
457
458/* Header parsing interrupt handling */
459static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
460				 unsigned int reason, unsigned int err)
461{
462	struct s5p_mfc_dev *dev;
463
464	if (ctx == NULL)
465		return;
466	dev = ctx->dev;
467	if (ctx->c_ops->post_seq_start) {
468		if (ctx->c_ops->post_seq_start(ctx))
469			mfc_err("post_seq_start() failed\n");
470	} else {
471		ctx->img_width = s5p_mfc_hw_call(dev->mfc_ops, get_img_width,
472				dev);
473		ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height,
474				dev);
475
476		s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
477
478		ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
479				dev);
480		ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
481				dev);
482		if (ctx->img_width == 0 || ctx->img_height == 0)
483			ctx->state = MFCINST_ERROR;
484		else
485			ctx->state = MFCINST_HEAD_PARSED;
486
487		if ((ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
488			ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) &&
489				!list_empty(&ctx->src_queue)) {
490			struct s5p_mfc_buf *src_buf;
491			src_buf = list_entry(ctx->src_queue.next,
492					struct s5p_mfc_buf, list);
493			if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
494						dev) <
495					src_buf->b->v4l2_planes[0].bytesused)
496				ctx->head_processed = 0;
497			else
498				ctx->head_processed = 1;
499		} else {
500			ctx->head_processed = 1;
501		}
502	}
503	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
504	clear_work_bit(ctx);
505	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
506		BUG();
507	s5p_mfc_clock_off();
508	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
509	wake_up_ctx(ctx, reason, err);
510}
511
512/* Header parsing interrupt handling */
513static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
514				 unsigned int reason, unsigned int err)
515{
516	struct s5p_mfc_buf *src_buf;
517	struct s5p_mfc_dev *dev;
518	unsigned long flags;
519
520	if (ctx == NULL)
521		return;
522	dev = ctx->dev;
523	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
524	ctx->int_type = reason;
525	ctx->int_err = err;
526	ctx->int_cond = 1;
527	clear_work_bit(ctx);
528	if (err == 0) {
529		ctx->state = MFCINST_RUNNING;
530		if (!ctx->dpb_flush_flag && ctx->head_processed) {
531			spin_lock_irqsave(&dev->irqlock, flags);
532			if (!list_empty(&ctx->src_queue)) {
533				src_buf = list_entry(ctx->src_queue.next,
534					     struct s5p_mfc_buf, list);
535				list_del(&src_buf->list);
536				ctx->src_queue_cnt--;
537				vb2_buffer_done(src_buf->b,
538						VB2_BUF_STATE_DONE);
539			}
540			spin_unlock_irqrestore(&dev->irqlock, flags);
541		} else {
542			ctx->dpb_flush_flag = 0;
543		}
544		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
545			BUG();
546
547		s5p_mfc_clock_off();
548
549		wake_up(&ctx->queue);
550		s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
551	} else {
552		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
553			BUG();
554
555		s5p_mfc_clock_off();
556
557		wake_up(&ctx->queue);
558	}
559}
560
561static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
562				 unsigned int reason, unsigned int err)
563{
564	struct s5p_mfc_dev *dev = ctx->dev;
565	struct s5p_mfc_buf *mb_entry;
566
567	mfc_debug(2, "Stream completed");
568
569	s5p_mfc_clear_int_flags(dev);
570	ctx->int_type = reason;
571	ctx->int_err = err;
572	ctx->state = MFCINST_FINISHED;
573
574	spin_lock(&dev->irqlock);
575	if (!list_empty(&ctx->dst_queue)) {
576		mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
577									list);
578		list_del(&mb_entry->list);
579		ctx->dst_queue_cnt--;
580		vb2_set_plane_payload(mb_entry->b, 0, 0);
581		vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
582	}
583	spin_unlock(&dev->irqlock);
584
585	clear_work_bit(ctx);
586
587	WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
588
589	s5p_mfc_clock_off();
590	wake_up(&ctx->queue);
591	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
592}
593
594/* Interrupt processing */
595static irqreturn_t s5p_mfc_irq(int irq, void *priv)
596{
597	struct s5p_mfc_dev *dev = priv;
598	struct s5p_mfc_ctx *ctx;
599	unsigned int reason;
600	unsigned int err;
601
602	mfc_debug_enter();
603	/* Reset the timeout watchdog */
604	atomic_set(&dev->watchdog_cnt, 0);
605	ctx = dev->ctx[dev->curr_ctx];
606	/* Get the reason of interrupt and the error code */
607	reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev);
608	err = s5p_mfc_hw_call(dev->mfc_ops, get_int_err, dev);
609	mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
610	switch (reason) {
611	case S5P_MFC_R2H_CMD_ERR_RET:
612		/* An error has occured */
613		if (ctx->state == MFCINST_RUNNING &&
614			s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
615				dev->warn_start)
616			s5p_mfc_handle_frame(ctx, reason, err);
617		else
618			s5p_mfc_handle_error(dev, ctx, reason, err);
619		clear_bit(0, &dev->enter_suspend);
620		break;
621
622	case S5P_MFC_R2H_CMD_SLICE_DONE_RET:
623	case S5P_MFC_R2H_CMD_FIELD_DONE_RET:
624	case S5P_MFC_R2H_CMD_FRAME_DONE_RET:
625		if (ctx->c_ops->post_frame_start) {
626			if (ctx->c_ops->post_frame_start(ctx))
627				mfc_err("post_frame_start() failed\n");
628			s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
629			wake_up_ctx(ctx, reason, err);
630			if (test_and_clear_bit(0, &dev->hw_lock) == 0)
631				BUG();
632			s5p_mfc_clock_off();
633			s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
634		} else {
635			s5p_mfc_handle_frame(ctx, reason, err);
636		}
637		break;
638
639	case S5P_MFC_R2H_CMD_SEQ_DONE_RET:
640		s5p_mfc_handle_seq_done(ctx, reason, err);
641		break;
642
643	case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
644		ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
645		ctx->state = MFCINST_GOT_INST;
646		clear_work_bit(ctx);
647		wake_up(&ctx->queue);
648		goto irq_cleanup_hw;
649
650	case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
651		clear_work_bit(ctx);
652		ctx->state = MFCINST_FREE;
653		wake_up(&ctx->queue);
654		goto irq_cleanup_hw;
655
656	case S5P_MFC_R2H_CMD_SYS_INIT_RET:
657	case S5P_MFC_R2H_CMD_FW_STATUS_RET:
658	case S5P_MFC_R2H_CMD_SLEEP_RET:
659	case S5P_MFC_R2H_CMD_WAKEUP_RET:
660		if (ctx)
661			clear_work_bit(ctx);
662		s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
663		wake_up_dev(dev, reason, err);
664		clear_bit(0, &dev->hw_lock);
665		clear_bit(0, &dev->enter_suspend);
666		break;
667
668	case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
669		s5p_mfc_handle_init_buffers(ctx, reason, err);
670		break;
671
672	case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET:
673		s5p_mfc_handle_stream_complete(ctx, reason, err);
674		break;
675
676	case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
677		clear_work_bit(ctx);
678		ctx->state = MFCINST_RUNNING;
679		wake_up(&ctx->queue);
680		goto irq_cleanup_hw;
681
682	default:
683		mfc_debug(2, "Unknown int reason\n");
684		s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
685	}
686	mfc_debug_leave();
687	return IRQ_HANDLED;
688irq_cleanup_hw:
689	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
690	ctx->int_type = reason;
691	ctx->int_err = err;
692	ctx->int_cond = 1;
693	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
694		mfc_err("Failed to unlock hw\n");
695
696	s5p_mfc_clock_off();
697
698	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
699	mfc_debug(2, "Exit via irq_cleanup_hw\n");
700	return IRQ_HANDLED;
701}
702
703/* Open an MFC node */
704static int s5p_mfc_open(struct file *file)
705{
706	struct s5p_mfc_dev *dev = video_drvdata(file);
707	struct s5p_mfc_ctx *ctx = NULL;
708	struct vb2_queue *q;
709	int ret = 0;
710
711	mfc_debug_enter();
712	if (mutex_lock_interruptible(&dev->mfc_mutex))
713		return -ERESTARTSYS;
714	dev->num_inst++;	/* It is guarded by mfc_mutex in vfd */
715	/* Allocate memory for context */
716	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
717	if (!ctx) {
718		mfc_err("Not enough memory\n");
719		ret = -ENOMEM;
720		goto err_alloc;
721	}
722	v4l2_fh_init(&ctx->fh, video_devdata(file));
723	file->private_data = &ctx->fh;
724	v4l2_fh_add(&ctx->fh);
725	ctx->dev = dev;
726	INIT_LIST_HEAD(&ctx->src_queue);
727	INIT_LIST_HEAD(&ctx->dst_queue);
728	ctx->src_queue_cnt = 0;
729	ctx->dst_queue_cnt = 0;
730	/* Get context number */
731	ctx->num = 0;
732	while (dev->ctx[ctx->num]) {
733		ctx->num++;
734		if (ctx->num >= MFC_NUM_CONTEXTS) {
735			mfc_err("Too many open contexts\n");
736			ret = -EBUSY;
737			goto err_no_ctx;
738		}
739	}
740	/* Mark context as idle */
741	clear_work_bit_irqsave(ctx);
742	dev->ctx[ctx->num] = ctx;
743	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
744		ctx->type = MFCINST_DECODER;
745		ctx->c_ops = get_dec_codec_ops();
746		s5p_mfc_dec_init(ctx);
747		/* Setup ctrl handler */
748		ret = s5p_mfc_dec_ctrls_setup(ctx);
749		if (ret) {
750			mfc_err("Failed to setup mfc controls\n");
751			goto err_ctrls_setup;
752		}
753	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
754		ctx->type = MFCINST_ENCODER;
755		ctx->c_ops = get_enc_codec_ops();
756		/* only for encoder */
757		INIT_LIST_HEAD(&ctx->ref_queue);
758		ctx->ref_queue_cnt = 0;
759		s5p_mfc_enc_init(ctx);
760		/* Setup ctrl handler */
761		ret = s5p_mfc_enc_ctrls_setup(ctx);
762		if (ret) {
763			mfc_err("Failed to setup mfc controls\n");
764			goto err_ctrls_setup;
765		}
766	} else {
767		ret = -ENOENT;
768		goto err_bad_node;
769	}
770	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
771	ctx->inst_no = -1;
772	/* Load firmware if this is the first instance */
773	if (dev->num_inst == 1) {
774		dev->watchdog_timer.expires = jiffies +
775					msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
776		add_timer(&dev->watchdog_timer);
777		ret = s5p_mfc_power_on();
778		if (ret < 0) {
779			mfc_err("power on failed\n");
780			goto err_pwr_enable;
781		}
782		s5p_mfc_clock_on();
783		ret = s5p_mfc_load_firmware(dev);
784		if (ret) {
785			s5p_mfc_clock_off();
786			goto err_load_fw;
787		}
788		/* Init the FW */
789		ret = s5p_mfc_init_hw(dev);
790		s5p_mfc_clock_off();
791		if (ret)
792			goto err_init_hw;
793	}
794	/* Init videobuf2 queue for CAPTURE */
795	q = &ctx->vq_dst;
796	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
797	q->drv_priv = &ctx->fh;
798	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
799		q->io_modes = VB2_MMAP;
800		q->ops = get_dec_queue_ops();
801	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
802		q->io_modes = VB2_MMAP | VB2_USERPTR;
803		q->ops = get_enc_queue_ops();
804	} else {
805		ret = -ENOENT;
806		goto err_queue_init;
807	}
808	q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
809	q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
810	ret = vb2_queue_init(q);
811	if (ret) {
812		mfc_err("Failed to initialize videobuf2 queue(capture)\n");
813		goto err_queue_init;
814	}
815	/* Init videobuf2 queue for OUTPUT */
816	q = &ctx->vq_src;
817	q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
818	q->io_modes = VB2_MMAP;
819	q->drv_priv = &ctx->fh;
820	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
821		q->io_modes = VB2_MMAP;
822		q->ops = get_dec_queue_ops();
823	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
824		q->io_modes = VB2_MMAP | VB2_USERPTR;
825		q->ops = get_enc_queue_ops();
826	} else {
827		ret = -ENOENT;
828		goto err_queue_init;
829	}
830	q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
831	q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
832	ret = vb2_queue_init(q);
833	if (ret) {
834		mfc_err("Failed to initialize videobuf2 queue(output)\n");
835		goto err_queue_init;
836	}
837	init_waitqueue_head(&ctx->queue);
838	mutex_unlock(&dev->mfc_mutex);
839	mfc_debug_leave();
840	return ret;
841	/* Deinit when failure occured */
842err_queue_init:
843	if (dev->num_inst == 1)
844		s5p_mfc_deinit_hw(dev);
845err_init_hw:
846err_load_fw:
847err_pwr_enable:
848	if (dev->num_inst == 1) {
849		if (s5p_mfc_power_off() < 0)
850			mfc_err("power off failed\n");
851		del_timer_sync(&dev->watchdog_timer);
852	}
853err_ctrls_setup:
854	s5p_mfc_dec_ctrls_delete(ctx);
855err_bad_node:
856	dev->ctx[ctx->num] = NULL;
857err_no_ctx:
858	v4l2_fh_del(&ctx->fh);
859	v4l2_fh_exit(&ctx->fh);
860	kfree(ctx);
861err_alloc:
862	dev->num_inst--;
863	mutex_unlock(&dev->mfc_mutex);
864	mfc_debug_leave();
865	return ret;
866}
867
868/* Release MFC context */
869static int s5p_mfc_release(struct file *file)
870{
871	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
872	struct s5p_mfc_dev *dev = ctx->dev;
873
874	mfc_debug_enter();
875	mutex_lock(&dev->mfc_mutex);
876	s5p_mfc_clock_on();
877	vb2_queue_release(&ctx->vq_src);
878	vb2_queue_release(&ctx->vq_dst);
879	/* Mark context as idle */
880	clear_work_bit_irqsave(ctx);
881	/* If instance was initialised then
882	 * return instance and free reosurces */
883	if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
884		mfc_debug(2, "Has to free instance\n");
885		ctx->state = MFCINST_RETURN_INST;
886		set_work_bit_irqsave(ctx);
887		s5p_mfc_clean_ctx_int_flags(ctx);
888		s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
889		/* Wait until instance is returned or timeout occured */
890		if (s5p_mfc_wait_for_done_ctx
891		    (ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
892			s5p_mfc_clock_off();
893			mfc_err("Err returning instance\n");
894		}
895		mfc_debug(2, "After free instance\n");
896		/* Free resources */
897		s5p_mfc_hw_call(dev->mfc_ops, release_codec_buffers, ctx);
898		s5p_mfc_hw_call(dev->mfc_ops, release_instance_buffer, ctx);
899		if (ctx->type == MFCINST_DECODER)
900			s5p_mfc_hw_call(dev->mfc_ops, release_dec_desc_buffer,
901					ctx);
902
903		ctx->inst_no = MFC_NO_INSTANCE_SET;
904	}
905	/* hardware locking scheme */
906	if (dev->curr_ctx == ctx->num)
907		clear_bit(0, &dev->hw_lock);
908	dev->num_inst--;
909	if (dev->num_inst == 0) {
910		mfc_debug(2, "Last instance\n");
911		s5p_mfc_deinit_hw(dev);
912		del_timer_sync(&dev->watchdog_timer);
913		if (s5p_mfc_power_off() < 0)
914			mfc_err("Power off failed\n");
915	}
916	mfc_debug(2, "Shutting down clock\n");
917	s5p_mfc_clock_off();
918	dev->ctx[ctx->num] = NULL;
919	s5p_mfc_dec_ctrls_delete(ctx);
920	v4l2_fh_del(&ctx->fh);
921	v4l2_fh_exit(&ctx->fh);
922	kfree(ctx);
923	mfc_debug_leave();
924	mutex_unlock(&dev->mfc_mutex);
925	return 0;
926}
927
928/* Poll */
929static unsigned int s5p_mfc_poll(struct file *file,
930				 struct poll_table_struct *wait)
931{
932	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
933	struct s5p_mfc_dev *dev = ctx->dev;
934	struct vb2_queue *src_q, *dst_q;
935	struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
936	unsigned int rc = 0;
937	unsigned long flags;
938
939	mutex_lock(&dev->mfc_mutex);
940	src_q = &ctx->vq_src;
941	dst_q = &ctx->vq_dst;
942	/*
943	 * There has to be at least one buffer queued on each queued_list, which
944	 * means either in driver already or waiting for driver to claim it
945	 * and start processing.
946	 */
947	if ((!src_q->streaming || list_empty(&src_q->queued_list))
948		&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
949		rc = POLLERR;
950		goto end;
951	}
952	mutex_unlock(&dev->mfc_mutex);
953	poll_wait(file, &ctx->fh.wait, wait);
954	poll_wait(file, &src_q->done_wq, wait);
955	poll_wait(file, &dst_q->done_wq, wait);
956	mutex_lock(&dev->mfc_mutex);
957	if (v4l2_event_pending(&ctx->fh))
958		rc |= POLLPRI;
959	spin_lock_irqsave(&src_q->done_lock, flags);
960	if (!list_empty(&src_q->done_list))
961		src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
962								done_entry);
963	if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
964				|| src_vb->state == VB2_BUF_STATE_ERROR))
965		rc |= POLLOUT | POLLWRNORM;
966	spin_unlock_irqrestore(&src_q->done_lock, flags);
967	spin_lock_irqsave(&dst_q->done_lock, flags);
968	if (!list_empty(&dst_q->done_list))
969		dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
970								done_entry);
971	if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
972				|| dst_vb->state == VB2_BUF_STATE_ERROR))
973		rc |= POLLIN | POLLRDNORM;
974	spin_unlock_irqrestore(&dst_q->done_lock, flags);
975end:
976	mutex_unlock(&dev->mfc_mutex);
977	return rc;
978}
979
980/* Mmap */
981static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
982{
983	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
984	struct s5p_mfc_dev *dev = ctx->dev;
985	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
986	int ret;
987
988	if (mutex_lock_interruptible(&dev->mfc_mutex))
989		return -ERESTARTSYS;
990	if (offset < DST_QUEUE_OFF_BASE) {
991		mfc_debug(2, "mmaping source\n");
992		ret = vb2_mmap(&ctx->vq_src, vma);
993	} else {		/* capture */
994		mfc_debug(2, "mmaping destination\n");
995		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
996		ret = vb2_mmap(&ctx->vq_dst, vma);
997	}
998	mutex_unlock(&dev->mfc_mutex);
999	return ret;
1000}
1001
1002/* v4l2 ops */
1003static const struct v4l2_file_operations s5p_mfc_fops = {
1004	.owner = THIS_MODULE,
1005	.open = s5p_mfc_open,
1006	.release = s5p_mfc_release,
1007	.poll = s5p_mfc_poll,
1008	.unlocked_ioctl = video_ioctl2,
1009	.mmap = s5p_mfc_mmap,
1010};
1011
1012static int match_child(struct device *dev, void *data)
1013{
1014	if (!dev_name(dev))
1015		return 0;
1016	return !strcmp(dev_name(dev), (char *)data);
1017}
1018
1019static void *mfc_get_drv_data(struct platform_device *pdev);
1020
1021static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
1022{
1023	unsigned int mem_info[2] = { };
1024
1025	dev->mem_dev_l = devm_kzalloc(&dev->plat_dev->dev,
1026			sizeof(struct device), GFP_KERNEL);
1027	if (!dev->mem_dev_l) {
1028		mfc_err("Not enough memory\n");
1029		return -ENOMEM;
1030	}
1031	device_initialize(dev->mem_dev_l);
1032	of_property_read_u32_array(dev->plat_dev->dev.of_node,
1033			"samsung,mfc-l", mem_info, 2);
1034	if (dma_declare_coherent_memory(dev->mem_dev_l, mem_info[0],
1035				mem_info[0], mem_info[1],
1036				DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
1037		mfc_err("Failed to declare coherent memory for\n"
1038		"MFC device\n");
1039		return -ENOMEM;
1040	}
1041
1042	dev->mem_dev_r = devm_kzalloc(&dev->plat_dev->dev,
1043			sizeof(struct device), GFP_KERNEL);
1044	if (!dev->mem_dev_r) {
1045		mfc_err("Not enough memory\n");
1046		return -ENOMEM;
1047	}
1048	device_initialize(dev->mem_dev_r);
1049	of_property_read_u32_array(dev->plat_dev->dev.of_node,
1050			"samsung,mfc-r", mem_info, 2);
1051	if (dma_declare_coherent_memory(dev->mem_dev_r, mem_info[0],
1052				mem_info[0], mem_info[1],
1053				DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
1054		pr_err("Failed to declare coherent memory for\n"
1055		"MFC device\n");
1056		return -ENOMEM;
1057	}
1058	return 0;
1059}
1060
1061/* MFC probe function */
1062static int s5p_mfc_probe(struct platform_device *pdev)
1063{
1064	struct s5p_mfc_dev *dev;
1065	struct video_device *vfd;
1066	struct resource *res;
1067	int ret;
1068
1069	pr_debug("%s++\n", __func__);
1070	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1071	if (!dev) {
1072		dev_err(&pdev->dev, "Not enough memory for MFC device\n");
1073		return -ENOMEM;
1074	}
1075
1076	spin_lock_init(&dev->irqlock);
1077	spin_lock_init(&dev->condlock);
1078	dev->plat_dev = pdev;
1079	if (!dev->plat_dev) {
1080		dev_err(&pdev->dev, "No platform data specified\n");
1081		return -ENODEV;
1082	}
1083
1084	dev->variant = mfc_get_drv_data(pdev);
1085
1086	ret = s5p_mfc_init_pm(dev);
1087	if (ret < 0) {
1088		dev_err(&pdev->dev, "failed to get mfc clock source\n");
1089		return ret;
1090	}
1091
1092	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1093
1094	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1095	if (IS_ERR(dev->regs_base))
1096		return PTR_ERR(dev->regs_base);
1097
1098	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1099	if (res == NULL) {
1100		dev_err(&pdev->dev, "failed to get irq resource\n");
1101		ret = -ENOENT;
1102		goto err_res;
1103	}
1104	dev->irq = res->start;
1105	ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
1106					IRQF_DISABLED, pdev->name, dev);
1107	if (ret) {
1108		dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
1109		goto err_res;
1110	}
1111
1112	if (pdev->dev.of_node) {
1113		if (s5p_mfc_alloc_memdevs(dev) < 0)
1114			goto err_res;
1115	} else {
1116		dev->mem_dev_l = device_find_child(&dev->plat_dev->dev,
1117				"s5p-mfc-l", match_child);
1118		if (!dev->mem_dev_l) {
1119			mfc_err("Mem child (L) device get failed\n");
1120			ret = -ENODEV;
1121			goto err_res;
1122		}
1123		dev->mem_dev_r = device_find_child(&dev->plat_dev->dev,
1124				"s5p-mfc-r", match_child);
1125		if (!dev->mem_dev_r) {
1126			mfc_err("Mem child (R) device get failed\n");
1127			ret = -ENODEV;
1128			goto err_res;
1129		}
1130	}
1131
1132	dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
1133	if (IS_ERR(dev->alloc_ctx[0])) {
1134		ret = PTR_ERR(dev->alloc_ctx[0]);
1135		goto err_res;
1136	}
1137	dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
1138	if (IS_ERR(dev->alloc_ctx[1])) {
1139		ret = PTR_ERR(dev->alloc_ctx[1]);
1140		goto err_mem_init_ctx_1;
1141	}
1142
1143	mutex_init(&dev->mfc_mutex);
1144
1145	ret = s5p_mfc_alloc_firmware(dev);
1146	if (ret)
1147		goto err_alloc_fw;
1148
1149	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1150	if (ret)
1151		goto err_v4l2_dev_reg;
1152	init_waitqueue_head(&dev->queue);
1153
1154	/* decoder */
1155	vfd = video_device_alloc();
1156	if (!vfd) {
1157		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1158		ret = -ENOMEM;
1159		goto err_dec_alloc;
1160	}
1161	vfd->fops	= &s5p_mfc_fops,
1162	vfd->ioctl_ops	= get_dec_v4l2_ioctl_ops();
1163	vfd->release	= video_device_release,
1164	vfd->lock	= &dev->mfc_mutex;
1165	vfd->v4l2_dev	= &dev->v4l2_dev;
1166	vfd->vfl_dir	= VFL_DIR_M2M;
1167	snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
1168	dev->vfd_dec	= vfd;
1169	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1170	if (ret) {
1171		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1172		video_device_release(vfd);
1173		goto err_dec_reg;
1174	}
1175	v4l2_info(&dev->v4l2_dev,
1176		  "decoder registered as /dev/video%d\n", vfd->num);
1177	video_set_drvdata(vfd, dev);
1178
1179	/* encoder */
1180	vfd = video_device_alloc();
1181	if (!vfd) {
1182		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1183		ret = -ENOMEM;
1184		goto err_enc_alloc;
1185	}
1186	vfd->fops	= &s5p_mfc_fops,
1187	vfd->ioctl_ops	= get_enc_v4l2_ioctl_ops();
1188	vfd->release	= video_device_release,
1189	vfd->lock	= &dev->mfc_mutex;
1190	vfd->v4l2_dev	= &dev->v4l2_dev;
1191	vfd->vfl_dir	= VFL_DIR_M2M;
1192	snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
1193	dev->vfd_enc	= vfd;
1194	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1195	if (ret) {
1196		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1197		video_device_release(vfd);
1198		goto err_enc_reg;
1199	}
1200	v4l2_info(&dev->v4l2_dev,
1201		  "encoder registered as /dev/video%d\n", vfd->num);
1202	video_set_drvdata(vfd, dev);
1203	platform_set_drvdata(pdev, dev);
1204
1205	dev->hw_lock = 0;
1206	dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
1207	INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
1208	atomic_set(&dev->watchdog_cnt, 0);
1209	init_timer(&dev->watchdog_timer);
1210	dev->watchdog_timer.data = (unsigned long)dev;
1211	dev->watchdog_timer.function = s5p_mfc_watchdog;
1212
1213	/* Initialize HW ops and commands based on MFC version */
1214	s5p_mfc_init_hw_ops(dev);
1215	s5p_mfc_init_hw_cmds(dev);
1216
1217	pr_debug("%s--\n", __func__);
1218	return 0;
1219
1220/* Deinit MFC if probe had failed */
1221err_enc_reg:
1222	video_device_release(dev->vfd_enc);
1223err_enc_alloc:
1224	video_unregister_device(dev->vfd_dec);
1225err_dec_reg:
1226	video_device_release(dev->vfd_dec);
1227err_dec_alloc:
1228	v4l2_device_unregister(&dev->v4l2_dev);
1229err_v4l2_dev_reg:
1230	s5p_mfc_release_firmware(dev);
1231err_alloc_fw:
1232	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1233err_mem_init_ctx_1:
1234	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1235err_res:
1236	s5p_mfc_final_pm(dev);
1237
1238	pr_debug("%s-- with error\n", __func__);
1239	return ret;
1240
1241}
1242
1243/* Remove the driver */
1244static int s5p_mfc_remove(struct platform_device *pdev)
1245{
1246	struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
1247
1248	v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
1249
1250	del_timer_sync(&dev->watchdog_timer);
1251	flush_workqueue(dev->watchdog_workqueue);
1252	destroy_workqueue(dev->watchdog_workqueue);
1253
1254	video_unregister_device(dev->vfd_enc);
1255	video_unregister_device(dev->vfd_dec);
1256	v4l2_device_unregister(&dev->v4l2_dev);
1257	s5p_mfc_release_firmware(dev);
1258	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1259	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1260	if (pdev->dev.of_node) {
1261		put_device(dev->mem_dev_l);
1262		put_device(dev->mem_dev_r);
1263	}
1264
1265	s5p_mfc_final_pm(dev);
1266	return 0;
1267}
1268
1269#ifdef CONFIG_PM_SLEEP
1270
1271static int s5p_mfc_suspend(struct device *dev)
1272{
1273	struct platform_device *pdev = to_platform_device(dev);
1274	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1275	int ret;
1276
1277	if (m_dev->num_inst == 0)
1278		return 0;
1279
1280	if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
1281		mfc_err("Error: going to suspend for a second time\n");
1282		return -EIO;
1283	}
1284
1285	/* Check if we're processing then wait if it necessary. */
1286	while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
1287		/* Try and lock the HW */
1288		/* Wait on the interrupt waitqueue */
1289		ret = wait_event_interruptible_timeout(m_dev->queue,
1290			m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond,
1291			msecs_to_jiffies(MFC_INT_TIMEOUT));
1292
1293		if (ret == 0) {
1294			mfc_err("Waiting for hardware to finish timed out\n");
1295			return -EIO;
1296		}
1297	}
1298
1299	return s5p_mfc_sleep(m_dev);
1300}
1301
1302static int s5p_mfc_resume(struct device *dev)
1303{
1304	struct platform_device *pdev = to_platform_device(dev);
1305	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1306
1307	if (m_dev->num_inst == 0)
1308		return 0;
1309	return s5p_mfc_wakeup(m_dev);
1310}
1311#endif
1312
1313#ifdef CONFIG_PM_RUNTIME
1314static int s5p_mfc_runtime_suspend(struct device *dev)
1315{
1316	struct platform_device *pdev = to_platform_device(dev);
1317	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1318
1319	atomic_set(&m_dev->pm.power, 0);
1320	return 0;
1321}
1322
1323static int s5p_mfc_runtime_resume(struct device *dev)
1324{
1325	struct platform_device *pdev = to_platform_device(dev);
1326	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1327	int pre_power;
1328
1329	if (!m_dev->alloc_ctx)
1330		return 0;
1331	pre_power = atomic_read(&m_dev->pm.power);
1332	atomic_set(&m_dev->pm.power, 1);
1333	return 0;
1334}
1335#endif
1336
1337/* Power management */
1338static const struct dev_pm_ops s5p_mfc_pm_ops = {
1339	SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
1340	SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
1341			   NULL)
1342};
1343
1344struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
1345	.h264_ctx	= MFC_H264_CTX_BUF_SIZE,
1346	.non_h264_ctx	= MFC_CTX_BUF_SIZE,
1347	.dsc		= DESC_BUF_SIZE,
1348	.shm		= SHARED_BUF_SIZE,
1349};
1350
1351struct s5p_mfc_buf_size buf_size_v5 = {
1352	.fw	= MAX_FW_SIZE,
1353	.cpb	= MAX_CPB_SIZE,
1354	.priv	= &mfc_buf_size_v5,
1355};
1356
1357struct s5p_mfc_buf_align mfc_buf_align_v5 = {
1358	.base = MFC_BASE_ALIGN_ORDER,
1359};
1360
1361static struct s5p_mfc_variant mfc_drvdata_v5 = {
1362	.version	= MFC_VERSION,
1363	.port_num	= MFC_NUM_PORTS,
1364	.buf_size	= &buf_size_v5,
1365	.buf_align	= &mfc_buf_align_v5,
1366	.mclk_name	= "sclk_mfc",
1367	.fw_name	= "s5p-mfc.fw",
1368};
1369
1370struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
1371	.dev_ctx	= MFC_CTX_BUF_SIZE_V6,
1372	.h264_dec_ctx	= MFC_H264_DEC_CTX_BUF_SIZE_V6,
1373	.other_dec_ctx	= MFC_OTHER_DEC_CTX_BUF_SIZE_V6,
1374	.h264_enc_ctx	= MFC_H264_ENC_CTX_BUF_SIZE_V6,
1375	.other_enc_ctx	= MFC_OTHER_ENC_CTX_BUF_SIZE_V6,
1376};
1377
1378struct s5p_mfc_buf_size buf_size_v6 = {
1379	.fw	= MAX_FW_SIZE_V6,
1380	.cpb	= MAX_CPB_SIZE_V6,
1381	.priv	= &mfc_buf_size_v6,
1382};
1383
1384struct s5p_mfc_buf_align mfc_buf_align_v6 = {
1385	.base = 0,
1386};
1387
1388static struct s5p_mfc_variant mfc_drvdata_v6 = {
1389	.version	= MFC_VERSION_V6,
1390	.port_num	= MFC_NUM_PORTS_V6,
1391	.buf_size	= &buf_size_v6,
1392	.buf_align	= &mfc_buf_align_v6,
1393	.mclk_name      = "aclk_333",
1394	.fw_name        = "s5p-mfc-v6.fw",
1395};
1396
1397static struct platform_device_id mfc_driver_ids[] = {
1398	{
1399		.name = "s5p-mfc",
1400		.driver_data = (unsigned long)&mfc_drvdata_v5,
1401	}, {
1402		.name = "s5p-mfc-v5",
1403		.driver_data = (unsigned long)&mfc_drvdata_v5,
1404	}, {
1405		.name = "s5p-mfc-v6",
1406		.driver_data = (unsigned long)&mfc_drvdata_v6,
1407	},
1408	{},
1409};
1410MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
1411
1412static const struct of_device_id exynos_mfc_match[] = {
1413	{
1414		.compatible = "samsung,mfc-v5",
1415		.data = &mfc_drvdata_v5,
1416	}, {
1417		.compatible = "samsung,mfc-v6",
1418		.data = &mfc_drvdata_v6,
1419	},
1420	{},
1421};
1422MODULE_DEVICE_TABLE(of, exynos_mfc_match);
1423
1424static void *mfc_get_drv_data(struct platform_device *pdev)
1425{
1426	struct s5p_mfc_variant *driver_data = NULL;
1427
1428	if (pdev->dev.of_node) {
1429		const struct of_device_id *match;
1430		match = of_match_node(of_match_ptr(exynos_mfc_match),
1431				pdev->dev.of_node);
1432		if (match)
1433			driver_data = (struct s5p_mfc_variant *)match->data;
1434	} else {
1435		driver_data = (struct s5p_mfc_variant *)
1436			platform_get_device_id(pdev)->driver_data;
1437	}
1438	return driver_data;
1439}
1440
1441static struct platform_driver s5p_mfc_driver = {
1442	.probe		= s5p_mfc_probe,
1443	.remove		= s5p_mfc_remove,
1444	.id_table	= mfc_driver_ids,
1445	.driver	= {
1446		.name	= S5P_MFC_NAME,
1447		.owner	= THIS_MODULE,
1448		.pm	= &s5p_mfc_pm_ops,
1449		.of_match_table = exynos_mfc_match,
1450	},
1451};
1452
1453module_platform_driver(s5p_mfc_driver);
1454
1455MODULE_LICENSE("GPL");
1456MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
1457MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
1458
1459